diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index b2598cc9834c3cbacfd9b3b1d885ed69c4c4c133..7242cbda15dd3bf8c4debea387ff2c8e376aeb60 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -109,6 +109,7 @@ parameter is applicable::
 	IPV6	IPv6 support is enabled.
 	ISAPNP	ISA PnP code is enabled.
 	ISDN	Appropriate ISDN support is enabled.
+	ISOL	CPU Isolation is enabled.
 	JOY	Appropriate joystick support is enabled.
 	KGDB	Kernel debugger support is enabled.
 	KVM	Kernel Virtual Machine support is enabled.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6571fbfdb2a1527c25b3a01e9c4228c84adce639..af7104aaffd92f1eb3db02df44ffc33e7df78adc 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -328,11 +328,15 @@
 			not play well with APC CPU idle - disable it if you have
 			APC and your system crashes randomly.
 
-	apic=		[APIC,X86-32] Advanced Programmable Interrupt Controller
+	apic=		[APIC,X86] Advanced Programmable Interrupt Controller
 			Change the output verbosity whilst booting
 			Format: { quiet (default) | verbose | debug }
 			Change the amount of debugging information output
 			when initialising the APIC and IO-APIC components.
+			For X86-32, this can also be used to specify an APIC
+			driver name.
+			Format: apic=driver_name
+			Examples: apic=bigsmp
 
 	apic_extnmi=	[APIC,X86] External NMI delivery setting
 			Format: { bsp (default) | all | none }
@@ -1737,7 +1741,7 @@
 	isapnp=		[ISAPNP]
 			Format: <RDP>,<reset>,<pci_scan>,<verbosity>
 
-	isolcpus=	[KNL,SMP] Isolate a given set of CPUs from disturbance.
+	isolcpus=	[KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance.
 			[Deprecated - use cpusets instead]
 			Format: [flag-list,]<cpu-list>
 
@@ -2662,7 +2666,7 @@
 			Valid arguments: on, off
 			Default: on
 
-	nohz_full=	[KNL,BOOT]
+	nohz_full=	[KNL,BOOT,SMP,ISOL]
 			The argument is a cpu list, as described above.
 			In kernels built with CONFIG_NO_HZ_FULL=y, set
 			the specified list of CPUs whose tick will be stopped
@@ -2708,6 +2712,8 @@
 			steal time is computed, but won't influence scheduler
 			behaviour
 
+	nopti		[X86-64] Disable kernel page table isolation
+
 	nolapic		[X86-32,APIC] Do not enable or use the local APIC.
 
 	nolapic_timer	[X86-32,APIC] Do not use the local APIC timer.
@@ -3282,6 +3288,12 @@
 	pt.		[PARIDE]
 			See Documentation/blockdev/paride.txt.
 
+	pti=		[X86_64]
+			Control user/kernel address space isolation:
+			on - enable
+			off - disable
+			auto - default setting
+
 	pty.legacy_count=
 			[KNL] Number of legacy pty's. Overwrites compiled-in
 			default number.
diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst
index de50a8561774249351515662404e2a1f8328aba6..9b55952039a692d8119ce62502af9a4dd071b8e6 100644
--- a/Documentation/admin-guide/thunderbolt.rst
+++ b/Documentation/admin-guide/thunderbolt.rst
@@ -230,7 +230,7 @@ If supported by your machine this will be exposed by the WMI bus with
 a sysfs attribute called "force_power".
 
 For example the intel-wmi-thunderbolt driver exposes this attribute in:
-  /sys/devices/platform/PNP0C14:00/wmi_bus/wmi_bus-PNP0C14:00/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power
+  /sys/bus/wmi/devices/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power
 
   To force the power to on, write 1 to this attribute file.
   To disable force power, write 0 to this attribute file.
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
index 376fa2f50e6bc9b41052928037acd4b3a382d380..956bb046e599d576e3f881b2901e0d369a3c9802 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
@@ -13,7 +13,6 @@ Required properties:
                  at25df321a
                  at25df641
                  at26df081a
-                 en25s64
                  mr25h128
                  mr25h256
                  mr25h10
@@ -33,7 +32,6 @@ Required properties:
                  s25fl008k
                  s25fl064k
                  sst25vf040b
-                 sst25wf040b
                  m25p40
                  m25p80
                  m25p16
diff --git a/Documentation/devicetree/bindings/sound/max98373.txt b/Documentation/devicetree/bindings/sound/max98373.txt
new file mode 100644
index 0000000000000000000000000000000000000000..456cb1c59353d2e5140eaf2a1d35540e485dcdcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98373.txt
@@ -0,0 +1,40 @@
+Maxim Integrated MAX98373 Speaker Amplifier
+
+This device supports I2C.
+
+Required properties:
+
+ - compatible : "maxim,max98373"
+
+ - reg : the I2C address of the device.
+
+Optional properties:
+
+  - maxim,vmon-slot-no : slot number used to send voltage information
+                   or in inteleave mode this will be used as
+                   interleave slot.
+                   slot range : 0 ~ 15,  Default : 0
+
+  - maxim,imon-slot-no : slot number used to send current information
+                   slot range : 0 ~ 15,  Default : 0
+
+  - maxim,spkfb-slot-no : slot number used to send speaker feedback information
+                   slot range : 0 ~ 15,  Default : 0
+
+  - maxim,interleave-mode : For cases where a single combined channel
+		   for the I/V sense data is not sufficient, the device can also be configured
+		   to share a single data output channel on alternating frames.
+		   In this configuration, the current and voltage data will be frame interleaved
+		   on a single output channel.
+                   Boolean, define to enable the interleave mode, Default : false
+
+Example:
+
+codec: max98373@31 {
+   compatible = "maxim,max98373";
+   reg = <0x31>;
+   maxim,vmon-slot-no = <0>;
+   maxim,imon-slot-no = <1>;
+   maxim,spkfb-slot-no = <2>;
+   maxim,interleave-mode;
+};
diff --git a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
index 77a57f84bed486204f0a7caa3780d6e18416544a..6df87b97f7cb27c0293e823f78ac41e5721ab877 100644
--- a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
+++ b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
@@ -2,153 +2,143 @@ Mediatek AFE PCM controller for mt2701
 
 Required properties:
 - compatible = "mediatek,mt2701-audio";
-- reg: register location and size
 - interrupts: should contain AFE and ASYS interrupts
 - interrupt-names: should be "afe" and "asys"
 - power-domains: should define the power domain
+- clocks: Must contain an entry for each entry in clock-names
+  See ../clocks/clock-bindings.txt for details
 - clock-names: should have these clock names:
 		"infra_sys_audio_clk",
 		"top_audio_mux1_sel",
 		"top_audio_mux2_sel",
-		"top_audio_mux1_div",
-		"top_audio_mux2_div",
-		"top_audio_48k_timing",
-		"top_audio_44k_timing",
-		"top_audpll_mux_sel",
-		"top_apll_sel",
-		"top_aud1_pll_98M",
-		"top_aud2_pll_90M",
-		"top_hadds2_pll_98M",
-		"top_hadds2_pll_294M",
-		"top_audpll",
-		"top_audpll_d4",
-		"top_audpll_d8",
-		"top_audpll_d16",
-		"top_audpll_d24",
-		"top_audintbus_sel",
-		"clk_26m",
-		"top_syspll1_d4",
-		"top_aud_k1_src_sel",
-		"top_aud_k2_src_sel",
-		"top_aud_k3_src_sel",
-		"top_aud_k4_src_sel",
-		"top_aud_k5_src_sel",
-		"top_aud_k6_src_sel",
-		"top_aud_k1_src_div",
-		"top_aud_k2_src_div",
-		"top_aud_k3_src_div",
-		"top_aud_k4_src_div",
-		"top_aud_k5_src_div",
-		"top_aud_k6_src_div",
-		"top_aud_i2s1_mclk",
-		"top_aud_i2s2_mclk",
-		"top_aud_i2s3_mclk",
-		"top_aud_i2s4_mclk",
-		"top_aud_i2s5_mclk",
-		"top_aud_i2s6_mclk",
-		"top_asm_m_sel",
-		"top_asm_h_sel",
-		"top_univpll2_d4",
-		"top_univpll2_d2",
-		"top_syspll_d5";
+		"top_audio_a1sys_hp",
+		"top_audio_a2sys_hp",
+		"i2s0_src_sel",
+		"i2s1_src_sel",
+		"i2s2_src_sel",
+		"i2s3_src_sel",
+		"i2s0_src_div",
+		"i2s1_src_div",
+		"i2s2_src_div",
+		"i2s3_src_div",
+		"i2s0_mclk_en",
+		"i2s1_mclk_en",
+		"i2s2_mclk_en",
+		"i2s3_mclk_en",
+		"i2so0_hop_ck",
+		"i2so1_hop_ck",
+		"i2so2_hop_ck",
+		"i2so3_hop_ck",
+		"i2si0_hop_ck",
+		"i2si1_hop_ck",
+		"i2si2_hop_ck",
+		"i2si3_hop_ck",
+		"asrc0_out_ck",
+		"asrc1_out_ck",
+		"asrc2_out_ck",
+		"asrc3_out_ck",
+		"audio_afe_pd",
+		"audio_afe_conn_pd",
+		"audio_a1sys_pd",
+		"audio_a2sys_pd",
+		"audio_mrgif_pd";
+- assigned-clocks: list of input clocks and dividers for the audio system.
+		   See ../clocks/clock-bindings.txt for details.
+- assigned-clocks-parents: parent of input clocks of assigned clocks.
+- assigned-clock-rates: list of clock frequencies of assigned clocks.
+
+Must be a subnode of MediaTek audsys device tree node.
+See ../arm/mediatek/mediatek,audsys.txt for details about the parent node.
 
 Example:
 
-	afe: mt2701-afe-pcm@11220000 {
-		compatible = "mediatek,mt2701-audio";
-		reg = <0 0x11220000 0 0x2000>,
-		      <0 0x112A0000 0 0x20000>;
-		interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
-			     <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
-		interrupt-names	= "afe", "asys";
-		power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
-		clocks = <&infracfg CLK_INFRA_AUDIO>,
-			 <&topckgen CLK_TOP_AUD_MUX1_SEL>,
-			 <&topckgen CLK_TOP_AUD_MUX2_SEL>,
-			 <&topckgen CLK_TOP_AUD_MUX1_DIV>,
-			 <&topckgen CLK_TOP_AUD_MUX2_DIV>,
-			 <&topckgen CLK_TOP_AUD_48K_TIMING>,
-			 <&topckgen CLK_TOP_AUD_44K_TIMING>,
-			 <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
-			 <&topckgen CLK_TOP_APLL_SEL>,
-			 <&topckgen CLK_TOP_AUD1PLL_98M>,
-			 <&topckgen CLK_TOP_AUD2PLL_90M>,
-			 <&topckgen CLK_TOP_HADDS2PLL_98M>,
-			 <&topckgen CLK_TOP_HADDS2PLL_294M>,
-			 <&topckgen CLK_TOP_AUDPLL>,
-			 <&topckgen CLK_TOP_AUDPLL_D4>,
-			 <&topckgen CLK_TOP_AUDPLL_D8>,
-			 <&topckgen CLK_TOP_AUDPLL_D16>,
-			 <&topckgen CLK_TOP_AUDPLL_D24>,
-			 <&topckgen CLK_TOP_AUDINTBUS_SEL>,
-			 <&clk26m>,
-			 <&topckgen CLK_TOP_SYSPLL1_D4>,
-			 <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
-			 <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
-			 <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
-			 <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
-			 <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
-			 <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
-			 <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
-			 <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
-			 <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
-			 <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
-			 <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
-			 <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
-			 <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
-			 <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
-			 <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
-			 <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
-			 <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
-			 <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
-			 <&topckgen CLK_TOP_ASM_M_SEL>,
-			 <&topckgen CLK_TOP_ASM_H_SEL>,
-			 <&topckgen CLK_TOP_UNIVPLL2_D4>,
-			 <&topckgen CLK_TOP_UNIVPLL2_D2>,
-			 <&topckgen CLK_TOP_SYSPLL_D5>;
+	audsys: audio-subsystem@11220000 {
+		compatible = "mediatek,mt2701-audsys", "syscon", "simple-mfd";
+		...
+
+		afe: audio-controller {
+			compatible = "mediatek,mt2701-audio";
+			interrupts =  <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+				      <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+			interrupt-names	= "afe", "asys";
+			power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+
+			clocks = <&infracfg CLK_INFRA_AUDIO>,
+				 <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+				 <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+				 <&topckgen CLK_TOP_AUD_48K_TIMING>,
+				 <&topckgen CLK_TOP_AUD_44K_TIMING>,
+				 <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+				 <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+				 <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+				 <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+				 <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+				 <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+				 <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+				 <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+				 <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+				 <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+				 <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+				 <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+				 <&audsys CLK_AUD_I2SO1>,
+				 <&audsys CLK_AUD_I2SO2>,
+				 <&audsys CLK_AUD_I2SO3>,
+				 <&audsys CLK_AUD_I2SO4>,
+				 <&audsys CLK_AUD_I2SIN1>,
+				 <&audsys CLK_AUD_I2SIN2>,
+				 <&audsys CLK_AUD_I2SIN3>,
+				 <&audsys CLK_AUD_I2SIN4>,
+				 <&audsys CLK_AUD_ASRCO1>,
+				 <&audsys CLK_AUD_ASRCO2>,
+				 <&audsys CLK_AUD_ASRCO3>,
+				 <&audsys CLK_AUD_ASRCO4>,
+				 <&audsys CLK_AUD_AFE>,
+				 <&audsys CLK_AUD_AFE_CONN>,
+				 <&audsys CLK_AUD_A1SYS>,
+				 <&audsys CLK_AUD_A2SYS>,
+				 <&audsys CLK_AUD_AFE_MRGIF>;
+
+			clock-names = "infra_sys_audio_clk",
+				      "top_audio_mux1_sel",
+				      "top_audio_mux2_sel",
+				      "top_audio_a1sys_hp",
+				      "top_audio_a2sys_hp",
+				      "i2s0_src_sel",
+				      "i2s1_src_sel",
+				      "i2s2_src_sel",
+				      "i2s3_src_sel",
+				      "i2s0_src_div",
+				      "i2s1_src_div",
+				      "i2s2_src_div",
+				      "i2s3_src_div",
+				      "i2s0_mclk_en",
+				      "i2s1_mclk_en",
+				      "i2s2_mclk_en",
+				      "i2s3_mclk_en",
+				      "i2so0_hop_ck",
+				      "i2so1_hop_ck",
+				      "i2so2_hop_ck",
+				      "i2so3_hop_ck",
+				      "i2si0_hop_ck",
+				      "i2si1_hop_ck",
+				      "i2si2_hop_ck",
+				      "i2si3_hop_ck",
+				      "asrc0_out_ck",
+				      "asrc1_out_ck",
+				      "asrc2_out_ck",
+				      "asrc3_out_ck",
+				      "audio_afe_pd",
+				      "audio_afe_conn_pd",
+				      "audio_a1sys_pd",
+				      "audio_a2sys_pd",
+				      "audio_mrgif_pd";
 
-		clock-names = "infra_sys_audio_clk",
-			      "top_audio_mux1_sel",
-			      "top_audio_mux2_sel",
-			      "top_audio_mux1_div",
-			      "top_audio_mux2_div",
-			      "top_audio_48k_timing",
-			      "top_audio_44k_timing",
-			      "top_audpll_mux_sel",
-			      "top_apll_sel",
-			      "top_aud1_pll_98M",
-			      "top_aud2_pll_90M",
-			      "top_hadds2_pll_98M",
-			      "top_hadds2_pll_294M",
-			      "top_audpll",
-			      "top_audpll_d4",
-			      "top_audpll_d8",
-			      "top_audpll_d16",
-			      "top_audpll_d24",
-			      "top_audintbus_sel",
-			      "clk_26m",
-			      "top_syspll1_d4",
-			      "top_aud_k1_src_sel",
-			      "top_aud_k2_src_sel",
-			      "top_aud_k3_src_sel",
-			      "top_aud_k4_src_sel",
-			      "top_aud_k5_src_sel",
-			      "top_aud_k6_src_sel",
-			      "top_aud_k1_src_div",
-			      "top_aud_k2_src_div",
-			      "top_aud_k3_src_div",
-			      "top_aud_k4_src_div",
-			      "top_aud_k5_src_div",
-			      "top_aud_k6_src_div",
-			      "top_aud_i2s1_mclk",
-			      "top_aud_i2s2_mclk",
-			      "top_aud_i2s3_mclk",
-			      "top_aud_i2s4_mclk",
-			      "top_aud_i2s5_mclk",
-			      "top_aud_i2s6_mclk",
-			      "top_asm_m_sel",
-			      "top_asm_h_sel",
-			      "top_univpll2_d4",
-			      "top_univpll2_d2",
-			      "top_syspll_d5";
+			assigned-clocks = <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+					  <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+					  <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+					  <&topckgen CLK_TOP_AUD_MUX2_DIV>;
+			assigned-clock-parents = <&topckgen CLK_TOP_AUD1PLL_98M>,
+						 <&topckgen CLK_TOP_AUD2PLL_90M>;
+			assigned-clock-rates = <0>, <0>, <49152000>, <45158400>;
+		};
 	};
diff --git a/Documentation/devicetree/bindings/sound/nau8825.txt b/Documentation/devicetree/bindings/sound/nau8825.txt
index 2f5e973285a648a3a7ea1bedb5d5cc9c79311589..d16d96839bcbf1a9aeb9c11fc5ffb79c25115c43 100644
--- a/Documentation/devicetree/bindings/sound/nau8825.txt
+++ b/Documentation/devicetree/bindings/sound/nau8825.txt
@@ -69,7 +69,7 @@ Optional properties:
   - nuvoton,jack-insert-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
   - nuvoton,jack-eject-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
 
-  - nuvoton,crosstalk-bypass: make crosstalk function bypass if set.
+  - nuvoton,crosstalk-enable: make crosstalk function enable if set.
 
   - clocks: list of phandle and clock specifier pairs according to common clock bindings for the
       clocks described in clock-names
@@ -98,7 +98,7 @@ Example:
       nuvoton,short-key-debounce = <2>;
       nuvoton,jack-insert-debounce = <7>;
       nuvoton,jack-eject-debounce = <7>;
-      nuvoton,crosstalk-bypass;
+      nuvoton,crosstalk-enable;
 
       clock-names = "mclk";
       clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
diff --git a/Documentation/devicetree/bindings/sound/pcm186x.txt b/Documentation/devicetree/bindings/sound/pcm186x.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1087f48559807edece39ee79fde95be9a0f94c6d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/pcm186x.txt
@@ -0,0 +1,42 @@
+Texas Instruments PCM186x Universal Audio ADC
+
+These devices support both I2C and SPI (configured with pin strapping
+on the board).
+
+Required properties:
+
+ - compatible : "ti,pcm1862",
+                "ti,pcm1863",
+                "ti,pcm1864",
+                "ti,pcm1865"
+
+ - reg : The I2C address of the device for I2C, the chip select
+         number for SPI.
+
+ - avdd-supply: Analog core power supply (3.3v)
+ - dvdd-supply: Digital core power supply
+ - iovdd-supply: Digital IO power supply
+        See regulator/regulator.txt for more information
+
+CODEC input pins:
+ * VINL1
+ * VINR1
+ * VINL2
+ * VINR2
+ * VINL3
+ * VINR3
+ * VINL4
+ * VINR4
+
+The pins can be used in referring sound node's audio-routing property.
+
+Example:
+
+	pcm186x: audio-codec@4a {
+		compatible = "ti,pcm1865";
+		reg = <0x4a>;
+
+		avdd-supply = <&reg_3v3_analog>;
+		dvdd-supply = <&reg_3v3>;
+		iovdd-supply = <&reg_1v8>;
+	};
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 085bec364caf6592539535d13a6e50a05515c888..5bed9a5957729453c3196a88781802bf41d853a5 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -4,7 +4,7 @@ Renesas R-Car sound
 * Modules
 =============================================
 
-Renesas R-Car sound is constructed from below modules
+Renesas R-Car and RZ/G sound is constructed from below modules
 (for Gen2 or later)
 
  SCU		: Sampling Rate Converter Unit
@@ -197,12 +197,17 @@ Ex)
 	[MEM] -> [SRC2] -> [CTU03] -+
 
 	sound {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
 		compatible = "simple-scu-audio-card";
 		...
-		simple-audio-card,cpu-0 {
+		simple-audio-card,cpu@0 {
+			reg = <0>;
 			sound-dai = <&rcar_sound 0>;
 		};
-		simple-audio-card,cpu-1 {
+		simple-audio-card,cpu@1 {
+			reg = <1>;
 			sound-dai = <&rcar_sound 1>;
 		};
 		simple-audio-card,codec {
@@ -334,9 +339,11 @@ Required properties:
 
 - compatible			: "renesas,rcar_sound-<soctype>", fallbacks
 				  "renesas,rcar_sound-gen1" if generation1, and
-				  "renesas,rcar_sound-gen2" if generation2
+				  "renesas,rcar_sound-gen2" if generation2 (or RZ/G1)
 				  "renesas,rcar_sound-gen3" if generation3
 				  Examples with soctypes are:
+				    - "renesas,rcar_sound-r8a7743" (RZ/G1M)
+				    - "renesas,rcar_sound-r8a7745" (RZ/G1E)
 				    - "renesas,rcar_sound-r8a7778" (R-Car M1A)
 				    - "renesas,rcar_sound-r8a7779" (R-Car H1)
 				    - "renesas,rcar_sound-r8a7790" (R-Car H2)
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index 166f2290233b2f55f11dabe42ea9362e0697e30d..17c13e74667da8d2135fa3360926909b6dbc569b 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -140,6 +140,7 @@ sound {
 	simple-audio-card,name = "Cubox Audio";
 
 	simple-audio-card,dai-link@0 {		/* I2S - HDMI */
+		reg = <0>;
 		format = "i2s";
 		cpu {
 			sound-dai = <&audio1 0>;
@@ -150,6 +151,7 @@ sound {
 	};
 
 	simple-audio-card,dai-link@1 {		/* S/PDIF - HDMI */
+		reg = <1>;
 		cpu {
 			sound-dai = <&audio1 1>;
 		};
@@ -159,6 +161,7 @@ sound {
 	};
 
 	simple-audio-card,dai-link@2 {		/* S/PDIF - S/PDIF */
+		reg = <2>;
 		cpu {
 			sound-dai = <&audio1 1>;
 		};
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index 1f9cd7095337881b5ee97ef51e9143efcbc0574e..b1acc1a256baacb81f46d02286b9a5664d088333 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -20,11 +20,6 @@ Required properties:
 
 Optional properties:
   - resets: Reference to a reset controller asserting the SAI
-  - st,sync: specify synchronization mode.
-	By default SAI sub-block is in asynchronous mode.
-	This property sets SAI sub-block as slave of another SAI sub-block.
-	Must contain the phandle and index of the sai sub-block providing
-	the synchronization.
 
 SAI subnodes:
 Two subnodes corresponding to SAI sub-block instances A et B can be defined.
@@ -44,6 +39,13 @@ SAI subnodes required properties:
   - pinctrl-names: should contain only value "default"
   - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
 
+SAI subnodes Optional properties:
+  - st,sync: specify synchronization mode.
+	By default SAI sub-block is in asynchronous mode.
+	This property sets SAI sub-block as slave of another SAI sub-block.
+	Must contain the phandle and index of the sai sub-block providing
+	the synchronization.
+
 The device node should contain one 'port' child node with one child 'endpoint'
 node, according to the bindings defined in Documentation/devicetree/bindings/
 graph.txt.
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
index 05d7135a8d2ffa7bb3732dd013f0b47c4dcfa9a8..b9d50d6cdef30cb402f354c11958f3c77a759a22 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -8,6 +8,7 @@ Required properties:
 - compatible: should be one of the following:
    - "allwinner,sun4i-a10-i2s"
    - "allwinner,sun6i-a31-i2s"
+   - "allwinner,sun8i-a83t-i2s"
    - "allwinner,sun8i-h3-i2s"
 - reg: physical base address of the controller and length of memory mapped
   region.
@@ -23,6 +24,7 @@ Required properties:
 
 Required properties for the following compatibles:
 	- "allwinner,sun6i-a31-i2s"
+	- "allwinner,sun8i-a83t-i2s"
 	- "allwinner,sun8i-h3-i2s"
 - resets: phandle to the reset line for this codec
 
diff --git a/Documentation/devicetree/bindings/sound/tas5720.txt b/Documentation/devicetree/bindings/sound/tas5720.txt
index 40d94f82beb364c1056e75d9f8e089368f63951a..7481653fe8e3eba498667dcebe9afb8d97dc3391 100644
--- a/Documentation/devicetree/bindings/sound/tas5720.txt
+++ b/Documentation/devicetree/bindings/sound/tas5720.txt
@@ -6,10 +6,12 @@ audio playback. For more product information please see the links below:
 
 http://www.ti.com/product/TAS5720L
 http://www.ti.com/product/TAS5720M
+http://www.ti.com/product/TAS5722L
 
 Required properties:
 
-- compatible : "ti,tas5720"
+- compatible : "ti,tas5720",
+               "ti,tas5722"
 - reg : I2C slave address
 - dvdd-supply : phandle to a 3.3-V supply for the digital circuitry
 - pvdd-supply : phandle to a supply used for the Class-D amp and the analog
diff --git a/Documentation/devicetree/bindings/sound/tfa9879.txt b/Documentation/devicetree/bindings/sound/tfa9879.txt
index 23ba522d9e2b2cf930a61237fb6e4ac27b103658..1620e6848436d630aadc5ff17cd028925623b89f 100644
--- a/Documentation/devicetree/bindings/sound/tfa9879.txt
+++ b/Documentation/devicetree/bindings/sound/tfa9879.txt
@@ -6,18 +6,18 @@ Required properties:
 
 - reg : the I2C address of the device
 
+- #sound-dai-cells : must be 0.
+
 Example:
 
 &i2c1 {
-	clock-frequency = <100000>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_i2c1>;
-	status = "okay";
 
-	codec: tfa9879@6c {
+	amp: amp@6c {
 		#sound-dai-cells = <0>;
 		compatible = "nxp,tfa9879";
 		reg = <0x6c>;
-        };
+	};
 };
 
diff --git a/Documentation/devicetree/bindings/sound/ti,tas6424.txt b/Documentation/devicetree/bindings/sound/ti,tas6424.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1c4ada0eef4e1613d2a88f79e3d572c65e64b1e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,tas6424.txt
@@ -0,0 +1,20 @@
+Texas Instruments TAS6424 Quad-Channel Audio amplifier
+
+The TAS6424 serial control bus communicates through I2C protocols.
+
+Required properties:
+	- compatible: "ti,tas6424" - TAS6424
+	- reg: I2C slave address
+	- sound-dai-cells: must be equal to 0
+
+Example:
+
+tas6424: tas6424@6a {
+	compatible = "ti,tas6424";
+	reg = <0x6a>;
+
+	#sound-dai-cells = <0>;
+};
+
+For more product information please see the link below:
+http://www.ti.com/product/TAS6424-Q1
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 6fbba562eaa7b36a46593160a0512dcc79e58a74..5b3c33bb99e57a4d21384aa7c1d36b062f14ab12 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -22,7 +22,7 @@ Required properties:
 
 Optional properties:
 
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
 - ai31xx-micbias-vg - MicBias Voltage setting
         1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V
         2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V
@@ -30,6 +30,10 @@ Optional properties:
 	If this node is not mentioned or if the value is unknown, then
 	micbias	is set to 2.0V.
 
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
 CODEC output pins:
   * HPL
   * HPR
@@ -48,6 +52,7 @@ CODEC input pins:
 The pins can be used in referring sound node's audio-routing property.
 
 Example:
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/sound/tlv320aic31xx-micbias.h>
 
 tlv320aic31xx: tlv320aic31xx@18 {
@@ -56,6 +61,8 @@ tlv320aic31xx: tlv320aic31xx@18 {
 
 	ai31xx-micbias-vg = <MICBIAS_OFF>;
 
+	reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
 	HPVDD-supply = <&regulator>;
 	SPRVDD-supply = <&regulator>;
 	SPLVDD-supply = <&regulator>;
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index ba5b45c483f5da6c3b1ced1d3d43416fc0f1851e..9796c46392620074c0ee32480e746b529e524b0e 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -17,7 +17,7 @@ Required properties:
 
 Optional properties:
 
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
 - ai3x-gpio-func - <array of 2 int> - AIC3X_GPIO1 & AIC3X_GPIO2 Functionality
 				    - Not supported on tlv320aic3104
 - ai3x-micbias-vg - MicBias Voltage required.
@@ -34,6 +34,10 @@ Optional properties:
 - AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the
   device as covered in Documentation/devicetree/bindings/regulator/regulator.txt
 
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
 CODEC output pins:
   * LLOUT
   * RLOUT
@@ -61,10 +65,14 @@ The pins can be used in referring sound node's audio-routing property.
 
 Example:
 
+#include <dt-bindings/gpio/gpio.h>
+
 tlv320aic3x: tlv320aic3x@1b {
 	compatible = "ti,tlv320aic3x";
 	reg = <0x1b>;
 
+	reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
 	AVDD-supply = <&regulator>;
 	IOVDD-supply = <&regulator>;
 	DRVDD-supply = <&regulator>;
diff --git a/Documentation/devicetree/bindings/sound/tscs42xx.txt b/Documentation/devicetree/bindings/sound/tscs42xx.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2ac2f099669761354469f9550700c00b61e7c91e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tscs42xx.txt
@@ -0,0 +1,16 @@
+TSCS42XX Audio CODEC
+
+Required Properties:
+
+	- compatible :	"tempo,tscs42A1" for analog mic
+			"tempo,tscs42A2" for digital mic
+
+	- reg : 	<0x71> for analog mic
+			<0x69> for digital mic
+
+Example:
+
+wookie: codec@69 {
+	compatible = "tempo,tscs42A2";
+	reg = <0x69>;
+};
diff --git a/Documentation/devicetree/bindings/sound/uniphier,evea.txt b/Documentation/devicetree/bindings/sound/uniphier,evea.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3f31b235f18b9bc0a3a82b0058fbde26c6181d6b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/uniphier,evea.txt
@@ -0,0 +1,26 @@
+Socionext EVEA - UniPhier SoC internal codec driver
+
+Required properties:
+- compatible      : should be "socionext,uniphier-evea".
+- reg             : offset and length of the register set for the device.
+- clock-names     : should include following entries:
+                    "evea", "exiv"
+- clocks          : a list of phandle, should contain an entry for each
+                    entries in clock-names.
+- reset-names     : should include following entries:
+                    "evea", "exiv", "adamv"
+- resets          : a list of phandle, should contain reset entries of
+                    reset-names.
+- #sound-dai-cells: should be 1.
+
+Example:
+
+	codec {
+		compatible = "socionext,uniphier-evea";
+		reg = <0x57900000 0x1000>;
+		clock-names = "evea", "exiv";
+		clocks = <&sys_clk 41>, <&sys_clk 42>;
+		reset-names = "evea", "exiv", "adamv";
+		resets = <&sys_rst 41>, <&sys_rst 42>, <&adamv_rst 0>;
+		#sound-dai-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
index 5bf13960f7f4a3c826c10b1e15a618df82d82403..e3c48b20b1a691b37d0b425251a257c682a38eca 100644
--- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
@@ -12,24 +12,30 @@ Required properties:
   - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
 - reg : Offset and length of the register set for the device
 - interrupts : Should contain CSPI/eCSPI interrupt
-- cs-gpios : Specifies the gpio pins to be used for chipselects.
 - clocks : Clock specifiers for both ipg and per clocks.
 - clock-names : Clock names should include both "ipg" and "per"
 See the clock consumer binding,
 	Documentation/devicetree/bindings/clock/clock-bindings.txt
-- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
-		Documentation/devicetree/bindings/dma/dma.txt
-- dma-names: DMA request names should include "tx" and "rx" if present.
 
-Obsolete properties:
-- fsl,spi-num-chipselects : Contains the number of the chipselect
+Recommended properties:
+- cs-gpios : GPIOs to use as chip selects, see spi-bus.txt.  While the native chip
+select lines can be used, they appear to always generate a pulse between each
+word of a transfer.  Most use cases will require GPIO based chip selects to
+generate a valid transaction.
 
 Optional properties:
+- num-cs :  Number of total chip selects, see spi-bus.txt.
+- dmas: DMA specifiers for tx and rx dma. See the DMA client binding,
+Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: DMA request names, if present, should include "tx" and "rx".
 - fsl,spi-rdy-drctl: Integer, representing the value of DRCTL, the register
 controlling the SPI_READY handling. Note that to enable the DRCTL consideration,
 the SPI_READY mode-flag needs to be set too.
 Valid values are: 0 (disabled), 1 (edge-triggered burst) and 2 (level-triggered burst).
 
+Obsolete properties:
+- fsl,spi-num-chipselects : Contains the number of the chipselect
+
 Example:
 
 ecspi@70010000 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 0994bdd82cd37ce0abf9882745fd174764524ac5..f776fb804a8c211fffeda56bab019e8c02c1bd56 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -347,6 +347,7 @@ tcg	Trusted Computing Group
 tcl	Toby Churchill Ltd.
 technexion	TechNexion
 technologic	Technologic Systems
+tempo	Tempo Semiconductor
 terasic	Terasic Inc.
 thine	THine Electronics, Inc.
 ti	Texas Instruments
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
index 3448e675b4623ce81b5e0bc1116c52a12c411801..ad41b3813f0a3a3bd5abb32532b42c9b820bbe27 100644
--- a/Documentation/x86/x86_64/mm.txt
+++ b/Documentation/x86/x86_64/mm.txt
@@ -1,6 +1,4 @@
 
-<previous description obsolete, deleted>
-
 Virtual memory map with 4 level page tables:
 
 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
@@ -14,13 +12,16 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
 ... unused hole ...
 ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
 ... unused hole ...
+fffffe0000000000 - fffffe7fffffffff (=39 bits) LDT remap for PTI
+fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffff9fffffff (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable)
-ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
+ffffffffa0000000 - [fixmap start]   (~1526 MB) module mapping space (variable)
+[fixmap start]   - ffffffffff5fffff kernel-internal fixmap range
+ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
 Virtual memory map with 5 level page tables:
@@ -29,26 +30,29 @@ Virtual memory map with 5 level page tables:
 hole caused by [56:63] sign extension
 ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor
 ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory
-ff90000000000000 - ff91ffffffffffff (=49 bits) hole
-ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
+ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI
+ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB)
 ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
 ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
 ... unused hole ...
 ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
 ... unused hole ...
+fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
 ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffff9fffffff (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
-ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
+ffffffffa0000000 - [fixmap start]   (~1526 MB) module mapping space
+[fixmap start]   - ffffffffff5fffff kernel-internal fixmap range
+ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
 Architecture defines a 64-bit virtual address. Implementations can support
 less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
-through to the most-significant implemented bit are set to either all ones
-or all zero. This causes hole between user space and kernel addresses.
+through to the most-significant implemented bit are sign extended.
+This causes hole between user space and kernel addresses if you interpret them
+as unsigned.
 
 The direct mapping covers all memory in the system up to the highest
 memory address (this means in some cases it can also include PCI memory
@@ -58,9 +62,6 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of
 the processes using the page fault handler, with init_top_pgt as
 reference.
 
-Current X86-64 implementations support up to 46 bits of address space (64 TB),
-which is our current limit. This expands into MBZ space in the page tables.
-
 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
 memory window (this size is arbitrary, it can be raised later if needed).
 The mappings are not part of any other kernel PGD and are only available
@@ -72,5 +73,3 @@ following fixmap section.
 Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
 physical memory, vmalloc/ioremap space and virtual memory map are randomized.
 Their order is preserved but their base will be offset early at boot time.
-
--Andi Kleen, Jul 2004
diff --git a/MAINTAINERS b/MAINTAINERS
index a6e86e20761e143ca976d4f8170e60b603bb5ed9..7e3178c488032efffc3964c182c3f15249aa692b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2621,24 +2621,22 @@ F:	fs/bfs/
 F:	include/uapi/linux/bfs_fs.h
 
 BLACKFIN ARCHITECTURE
-M:	Steven Miao <realmz6@gmail.com>
 L:	adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 T:	git git://git.code.sf.net/p/adi-linux/code
 W:	http://blackfin.uclinux.org
-S:	Supported
+S:	Orphan
 F:	arch/blackfin/
 
 BLACKFIN EMAC DRIVER
 L:	adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org
-S:	Supported
+S:	Orphan
 F:	drivers/net/ethernet/adi/
 
 BLACKFIN MEDIA DRIVER
-M:	Scott Jiang <scott.jiang.linux@gmail.com>
 L:	adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org/
-S:	Supported
+S:	Orphan
 F:	drivers/media/platform/blackfin/
 F:	drivers/media/i2c/adv7183*
 F:	drivers/media/i2c/vs6624*
@@ -2646,25 +2644,25 @@ F:	drivers/media/i2c/vs6624*
 BLACKFIN RTC DRIVER
 L:	adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org
-S:	Supported
+S:	Orphan
 F:	drivers/rtc/rtc-bfin.c
 
 BLACKFIN SDH DRIVER
 L:	adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org
-S:	Supported
+S:	Orphan
 F:	drivers/mmc/host/bfin_sdh.c
 
 BLACKFIN SERIAL DRIVER
 L:	adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org
-S:	Supported
+S:	Orphan
 F:	drivers/tty/serial/bfin_uart.c
 
 BLACKFIN WATCHDOG DRIVER
 L:	adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org
-S:	Supported
+S:	Orphan
 F:	drivers/watchdog/bfin_wdt.c
 
 BLINKM RGB LED DRIVER
@@ -9806,6 +9804,7 @@ NXP TFA9879 DRIVER
 M:	Peter Rosin <peda@axentia.se>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Maintained
+F:	Documentation/devicetree/bindings/sound/tfa9879.txt
 F:	sound/soc/codecs/tfa9879*
 
 NXP-NCI NFC DRIVER
@@ -12594,6 +12593,12 @@ F:	include/media/soc*
 F:	drivers/media/i2c/soc_camera/
 F:	drivers/media/platform/soc_camera/
 
+SOCIONEXT UNIPHIER SOUND DRIVER
+M:	Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+S:	Maintained
+F:	sound/soc/uniphier/
+
 SOEKRIS NET48XX LED SUPPORT
 M:	Chris Boot <bootc@bootc.net>
 S:	Maintained
@@ -13493,6 +13498,7 @@ M:	Mika Westerberg <mika.westerberg@linux.intel.com>
 M:	Yehezkel Bernat <yehezkel.bernat@intel.com>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
 S:	Maintained
+F:	Documentation/admin-guide/thunderbolt.rst
 F:	drivers/thunderbolt/
 F:	include/linux/thunderbolt.h
 
@@ -13851,6 +13857,13 @@ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
 S:	Maintained
 K:	^Subject:.*(?i)trivial
 
+TEMPO SEMICONDUCTOR DRIVERS
+M:	Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+S:	Maintained
+F:	sound/soc/codecs/tscs*.c
+F:	sound/soc/codecs/tscs*.h
+F:	Documentation/devicetree/bindings/sound/tscs*.txt
+
 TTY LAYER
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:	Jiri Slaby <jslaby@suse.com>
diff --git a/Makefile b/Makefile
index 7e02f951b284187d5354c2b7bd39b0ef1bf5d903..eb1f5973813e0f0cdab86292e977d5d011d9bd55 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -789,6 +789,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS	+= $(call cc-option,-fno-strict-overflow)
 
+# Make sure -fstack-check isn't enabled (like gentoo apparently did)
+KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
+
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1712f132b80d2402d94d72ea974a0c3326fa2f52..b83fdc06286a64ece150fb7e419bc587e47c3e34 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -85,7 +85,11 @@
 		.pushsection .text.fixup,"ax"
 		.align	4
 9001:		mov	r4, #-EFAULT
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		ldr	r5, [sp, #9*4]		@ *err_ptr
+#else
 		ldr	r5, [sp, #8*4]		@ *err_ptr
+#endif
 		str	r4, [r5]
 		ldmia	sp, {r1, r2}		@ retrieve dst, len
 		add	r2, r2, r1
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 321c9c05dd9e09fc0c745a4543a286b7628f00a4..f4363d40e2cd7fd62d40d826d5296c95f15cde9f 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -74,6 +74,9 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
 {
 	u64 reg;
 
+	/* Clear pmscr in case of early return */
+	*pmscr_el1 = 0;
+
 	/* SPE present on this CPU? */
 	if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
 						  ID_AA64DFR0_PMSVER_SHIFT))
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
index 9345b44b86f036572e33721eb80e9bbbe4493aa4..f57118e1f6b4265257799ae2cf8ea356077e20b9 100644
--- a/arch/parisc/boot/compressed/misc.c
+++ b/arch/parisc/boot/compressed/misc.c
@@ -123,8 +123,8 @@ int puts(const char *s)
 	while ((nuline = strchr(s, '\n')) != NULL) {
 		if (nuline != s)
 			pdc_iodc_print(s, nuline - s);
-			pdc_iodc_print("\r\n", 2);
-			s = nuline + 1;
+		pdc_iodc_print("\r\n", 2);
+		s = nuline + 1;
 	}
 	if (*s != '\0')
 		pdc_iodc_print(s, strlen(s));
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index c980a02a52bc0dda0a23b205f59d1d86438553f2..598c8d60fa5e602cc9303e1986ada9680d64feb3 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -35,7 +35,12 @@ struct thread_info {
 
 /* thread information allocation */
 
+#ifdef CONFIG_IRQSTACKS
+#define THREAD_SIZE_ORDER	2 /* PA-RISC requires at least 16k stack */
+#else
 #define THREAD_SIZE_ORDER	3 /* PA-RISC requires at least 32k stack */
+#endif
+
 /* Be sure to hunt all references to this down when you change the size of
  * the kernel stack */
 #define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index a4fd296c958e8e14f13a913aca50510b11eb49b7..f3cecf5117cf8ab14724f0ea3535220c3224d569 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -878,9 +878,6 @@ ENTRY_CFI(syscall_exit_rfi)
 	STREG   %r19,PT_SR7(%r16)
 
 intr_return:
-	/* NOTE: Need to enable interrupts incase we schedule. */
-	ssm     PSW_SM_I, %r0
-
 	/* check for reschedule */
 	mfctl   %cr30,%r1
 	LDREG   TI_FLAGS(%r1),%r19	/* sched.h: TIF_NEED_RESCHED */
@@ -907,6 +904,11 @@ intr_check_sig:
 	LDREG	PT_IASQ1(%r16), %r20
 	cmpib,COND(=),n 0,%r20,intr_restore /* backward */
 
+	/* NOTE: We need to enable interrupts if we have to deliver
+	 * signals. We used to do this earlier but it caused kernel
+	 * stack overflows. */
+	ssm     PSW_SM_I, %r0
+
 	copy	%r0, %r25			/* long in_syscall = 0 */
 #ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
@@ -958,6 +960,10 @@ intr_do_resched:
 	cmpib,COND(=)	0, %r20, intr_do_preempt
 	nop
 
+	/* NOTE: We need to enable interrupts if we schedule.  We used
+	 * to do this earlier but it caused kernel stack overflows. */
+	ssm     PSW_SM_I, %r0
+
 #ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
index e3a8e5e4d5de75897adcea4134f87c7246f60646..8d072c44f300c16d45ba8f4ee0c2eee6435e4ddd 100644
--- a/arch/parisc/kernel/hpmc.S
+++ b/arch/parisc/kernel/hpmc.S
@@ -305,6 +305,7 @@ ENDPROC_CFI(os_hpmc)
 
 
 	__INITRODATA
+	.align 4
 	.export os_hpmc_size
 os_hpmc_size:
 	.word .os_hpmc_end-.os_hpmc
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 5a657986ebbf4bef7beff4e8c8d20f1343872347..143f90e2f9f3c631616d4af52f0fe3fa08f44af9 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/kallsyms.h>
 #include <linux/sort.h>
-#include <linux/sched.h>
 
 #include <linux/uaccess.h>
 #include <asm/assembly.h>
diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c
index 7eab4bb8abe630b14c54c3b457285b4228607dc6..66e506520505d8a3245d49d492831df5e3bbb42a 100644
--- a/arch/parisc/lib/delay.c
+++ b/arch/parisc/lib/delay.c
@@ -16,9 +16,7 @@
 #include <linux/preempt.h>
 #include <linux/init.h>
 
-#include <asm/processor.h>
 #include <asm/delay.h>
-
 #include <asm/special_insns.h>    /* for mfctl() */
 #include <asm/processor.h> /* for boot_cpu_data */
 
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6177d43f0ce8afa9c1f6a1101e92ba161e47d97a..e2a2b8400490049143edee40316313a906ca6db7 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -160,9 +160,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
 #endif
 }
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-				 struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+				struct mm_struct *mm)
 {
+	return 0;
 }
 
 #ifndef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 5acb5a176dbe5c8bffe6ddb7458b7d3ac2b7019f..72be0c32e902a35fa45e5ed02036df91999dda58 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1403,7 +1403,7 @@ void show_regs(struct pt_regs * regs)
 
 	printk("NIP:  "REG" LR: "REG" CTR: "REG"\n",
 	       regs->nip, regs->link, regs->ctr);
-	printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
+	printk("REGS: %px TRAP: %04lx   %s  (%s)\n",
 	       regs, regs->trap, print_tainted(), init_utsname()->release);
 	printk("MSR:  "REG" ", regs->msr);
 	print_msr_bits(regs->msr);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index bf457843e03217b9aa02815d7791f0fce72aea2b..0d750d274c4e21a3324eb3505bbd73c86a58cdc9 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
 
 	/* Return the per-cpu state for state saving/migration */
 	return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
-	       (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
+	       (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
+	       (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
 }
 
 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
@@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
 
 	/*
 	 * Restore P and Q. If the interrupt was pending, we
-	 * force both P and Q, which will trigger a resend.
+	 * force Q and !P, which will trigger a resend.
 	 *
 	 * That means that a guest that had both an interrupt
 	 * pending (queued) and Q set will restore with only
@@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
 	 * is perfectly fine as coalescing interrupts that haven't
 	 * been presented yet is always allowed.
 	 */
-	if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
+	if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
 		state->old_p = true;
 	if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
 		state->old_q = true;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 46d74e81aff1b4caad4769e7686fa0a800695cd4..d183b4801bdbded832b90d2aa1a18e713f70695b 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -763,7 +763,8 @@ emit_clear:
 			func = (u8 *) __bpf_call_base + imm;
 
 			/* Save skb pointer if we need to re-cache skb data */
-			if (bpf_helper_changes_pkt_data(func))
+			if ((ctx->seen & SEEN_SKB) &&
+			    bpf_helper_changes_pkt_data(func))
 				PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
 
 			bpf_jit_emit_func_call(image, ctx, (u64)func);
@@ -772,7 +773,8 @@ emit_clear:
 			PPC_MR(b2p[BPF_REG_0], 3);
 
 			/* refresh skb cache */
-			if (bpf_helper_changes_pkt_data(func)) {
+			if ((ctx->seen & SEEN_SKB) &&
+			    bpf_helper_changes_pkt_data(func)) {
 				/* reload skb pointer to r3 */
 				PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
 				bpf_jit_emit_skb_loads(image, ctx);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 1538129663658381b6b1a425dcbf582b1ed09531..fce545774d50afc6093c28ad2f4127c24ed5331c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
 	int ret;
 	__u64 target;
 
-	if (is_kernel_addr(addr))
-		return branch_target((unsigned int *)addr);
+	if (is_kernel_addr(addr)) {
+		if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+			return 0;
+
+		return branch_target(&instr);
+	}
 
 	/* Userspace: need copy instruction here then translate it */
 	pagefault_disable();
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 0ead3cd73caa2f8816e8c04f47cca691efba0560..be4e7f84f70a59db60e92a9bfe845678f71cc608 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -309,6 +309,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
 	if (!cpumask_test_and_clear_cpu(cpu, &nest_imc_cpumask))
 		return 0;
 
+	/*
+	 * Check whether nest_imc is registered. We could end up here if the
+	 * cpuhotplug callback registration fails. i.e, callback invokes the
+	 * offline path for all successfully registered nodes. At this stage,
+	 * nest_imc pmu will not be registered and we should return here.
+	 *
+	 * We return with a zero since this is not an offline failure. And
+	 * cpuhp_setup_state() returns the actual failure reason to the caller,
+	 * which in turn will call the cleanup routine.
+	 */
+	if (!nest_pmus)
+		return 0;
+
 	/*
 	 * Now that this cpu is one of the designated,
 	 * find a next cpu a) which is online and b) in same chip.
@@ -1171,6 +1184,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 		if (nest_pmus == 1) {
 			cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
 			kfree(nest_imc_refc);
+			kfree(per_nest_pmu_arr);
 		}
 
 		if (nest_pmus > 0)
@@ -1195,7 +1209,6 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 		kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
 	kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
 	kfree(pmu_ptr);
-	kfree(per_nest_pmu_arr);
 	return;
 }
 
@@ -1309,6 +1322,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
 			ret = nest_pmu_cpumask_init();
 			if (ret) {
 				mutex_unlock(&nest_init_lock);
+				kfree(nest_imc_refc);
+				kfree(per_nest_pmu_arr);
 				goto err_free;
 			}
 		}
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 44cbf4c12ea137562f02758aab4e2e604f54e96f..df95102e732cb466911b32632fd53e3f3369bf54 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -354,6 +354,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
 }
 
 static struct lock_class_key fsl_msi_irq_class;
+static struct lock_class_key fsl_msi_irq_request_class;
 
 static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
 			       int offset, int irq_index)
@@ -373,7 +374,8 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
 		dev_err(&dev->dev, "No memory for MSI cascade data\n");
 		return -ENOMEM;
 	}
-	irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class);
+	irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
+			      &fsl_msi_irq_request_class);
 	cascade_data->index = offset;
 	cascade_data->msi_data = msi;
 	cascade_data->virq = virt_msir;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e81c16838b90f1bc9a5418bc1b4e5365e9cb0aef..9557d8b516df5a689dda995cd7fd501ddd6cf54c 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -55,8 +55,7 @@ struct bpf_jit {
 #define SEEN_LITERAL	8	/* code uses literals */
 #define SEEN_FUNC	16	/* calls C functions */
 #define SEEN_TAIL_CALL	32	/* code uses tail calls */
-#define SEEN_SKB_CHANGE	64	/* code changes skb data */
-#define SEEN_REG_AX	128	/* code uses constant blinding */
+#define SEEN_REG_AX	64	/* code uses constant blinding */
 #define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -448,12 +447,12 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
 			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 				      REG_15, 152);
 	}
-	if (jit->seen & SEEN_SKB)
+	if (jit->seen & SEEN_SKB) {
 		emit_load_skb_data_hlen(jit);
-	if (jit->seen & SEEN_SKB_CHANGE)
 		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
 		EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
 			      STK_OFF_SKBP);
+	}
 }
 
 /*
@@ -983,8 +982,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
 		EMIT2(0x0d00, REG_14, REG_W1);
 		/* lgr %b0,%r2: load return value into %b0 */
 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
-		if (bpf_helper_changes_pkt_data((void *)func)) {
-			jit->seen |= SEEN_SKB_CHANGE;
+		if ((jit->seen & SEEN_SKB) &&
+		    bpf_helper_changes_pkt_data((void *)func)) {
 			/* lg %b1,ST_OFF_SKBP(%r15) */
 			EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
 				      REG_15, STK_OFF_SKBP);
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index e5547b22cd1832c3aea507b3dfc694da90568222..0ddbbb03182232fe199f5222248617c72a2f23b7 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -44,8 +44,8 @@ EXPORT_SYMBOL(__arch_hweight32)
 	.previous
 
 ENTRY(__arch_hweight64)
-	sethi	%hi(__sw_hweight16), %g1
-	jmpl	%g1 + %lo(__sw_hweight16), %g0
+	sethi	%hi(__sw_hweight64), %g1
+	jmpl	%g1 + %lo(__sw_hweight64), %g0
 	 nop
 ENDPROC(__arch_hweight64)
 EXPORT_SYMBOL(__arch_hweight64)
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index be3136f142a9993e0c6c8cfa1d651b1685654a73..a8103a84b4ac4a2ec84c44c302862b3aed8b7e7f 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -113,7 +113,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 	       tsk->comm, task_pid_nr(tsk), address,
 	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 815c03d7a765524424b92866b1567ea2a43695d4..41363f46797bf9f74dd922fadbd2a3f190e8c9bb 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -154,7 +154,7 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
+	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 	       tsk->comm, task_pid_nr(tsk), address,
 	       (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 5765e7e711f78248d2bff70f9c57ca48a4514355..ff5f9cb3039af1f91c8701915f08c051c21d0d81 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1245,14 +1245,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
 		u8 *func = ((u8 *)__bpf_call_base) + imm;
 
 		ctx->saw_call = true;
+		if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+			emit_reg_move(bpf2sparc[BPF_REG_1], L7, ctx);
 
 		emit_call((u32 *)func, ctx);
 		emit_nop(ctx);
 
 		emit_reg_move(O0, bpf2sparc[BPF_REG_0], ctx);
 
-		if (bpf_helper_changes_pkt_data(func) && ctx->saw_ld_abs_ind)
-			load_skb_regs(ctx, bpf2sparc[BPF_REG_6]);
+		if (ctx->saw_ld_abs_ind && bpf_helper_changes_pkt_data(func))
+			load_skb_regs(ctx, L7);
 		break;
 	}
 
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index b668e351fd6c2e4f7a4b75c8a67eada77449abc9..fca34b2177e28a055663055d01c4fb7d78420285 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm);
 /*
  * Needed since we do not use the asm-generic/mm_hooks.h:
  */
-static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
 	uml_setup_stubs(mm);
+	return 0;
 }
 extern void arch_exit_mmap(struct mm_struct *mm);
 static inline void arch_unmap(struct mm_struct *mm,
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 4e6fcb32620ffb2125f648622499e5bf7c950e72..428644175956231aad112a0ce221452913736635 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -150,7 +150,7 @@ static void show_segv_info(struct uml_pt_regs *regs)
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x",
+	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %x",
 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 		tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
 		(void *)UPT_IP(regs), (void *)UPT_SP(regs),
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
index 59b06b48f27d7a4e0d8b82fc147d3fdad7f75295..5c205a9cb5a6a4bb2c865255bc946d7ca4882db1 100644
--- a/arch/unicore32/include/asm/mmu_context.h
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -81,9 +81,10 @@ do { \
 	} \
 } while (0)
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-				 struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+				struct mm_struct *mm)
 {
+	return 0;
 }
 
 static inline void arch_unmap(struct mm_struct *mm,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8eed3f94bfc774de5e3f344590f8889a999dea9c..d4fc98c50378c40bc901f6446d2bfff68151eb6a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -926,7 +926,8 @@ config MAXSMP
 config NR_CPUS
 	int "Maximum number of CPUs" if SMP && !MAXSMP
 	range 2 8 if SMP && X86_32 && !X86_BIGSMP
-	range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK
+	range 2 64 if SMP && X86_32 && X86_BIGSMP
+	range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64
 	range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64
 	default "1" if !SMP
 	default "8192" if MAXSMP
diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
index d5364ca2e3f9290d0ba36606b7e25369f872c144..b5e5e02f8cde7fa9123dc56981c3a3a98f45843c 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -23,6 +23,9 @@
  */
 #undef CONFIG_AMD_MEM_ENCRYPT
 
+/* No PAGE_TABLE_ISOLATION support needed either: */
+#undef CONFIG_PAGE_TABLE_ISOLATION
+
 #include "misc.h"
 
 /* These actually do the work of building the kernel identity maps. */
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
index c9e8499fbfe75c0a98d0223247fa6cb0746198bb..6a10d52a41452d941c22e197fa50b4ebd93ba0d5 100644
--- a/arch/x86/boot/genimage.sh
+++ b/arch/x86/boot/genimage.sh
@@ -80,39 +80,43 @@ genfdimage288() {
 	mcopy $FBZIMAGE w:linux
 }
 
-genisoimage() {
+geniso() {
 	tmp_dir=`dirname $FIMAGE`/isoimage
 	rm -rf $tmp_dir
 	mkdir $tmp_dir
-	for i in lib lib64 share end ; do
+	for i in lib lib64 share ; do
 		for j in syslinux ISOLINUX ; do
 			if [ -f /usr/$i/$j/isolinux.bin ] ; then
 				isolinux=/usr/$i/$j/isolinux.bin
-				cp $isolinux $tmp_dir
 			fi
 		done
 		for j in syslinux syslinux/modules/bios ; do
 			if [ -f /usr/$i/$j/ldlinux.c32 ]; then
 				ldlinux=/usr/$i/$j/ldlinux.c32
-				cp $ldlinux $tmp_dir
 			fi
 		done
 		if [ -n "$isolinux" -a -n "$ldlinux" ] ; then
 			break
 		fi
-		if [ $i = end -a -z "$isolinux" ] ; then
-			echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
-			exit 1
-		fi
 	done
+	if [ -z "$isolinux" ] ; then
+		echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
+		exit 1
+	fi
+	if [ -z "$ldlinux" ] ; then
+		echo 'Need an ldlinux.c32 file, please install syslinux/isolinux.'
+		exit 1
+	fi
+	cp $isolinux $tmp_dir
+	cp $ldlinux $tmp_dir
 	cp $FBZIMAGE $tmp_dir/linux
 	echo "$KCMDLINE" > $tmp_dir/isolinux.cfg
 	if [ -f "$FDINITRD" ] ; then
 		cp "$FDINITRD" $tmp_dir/initrd.img
 	fi
-	mkisofs -J -r -input-charset=utf-8 -quiet -o $FIMAGE -b isolinux.bin \
-		-c boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table \
-		$tmp_dir
+	genisoimage -J -r -input-charset=utf-8 -quiet -o $FIMAGE \
+		-b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 \
+		-boot-info-table $tmp_dir
 	isohybrid $FIMAGE 2>/dev/null || true
 	rm -rf $tmp_dir
 }
@@ -121,6 +125,6 @@ case $1 in
 	bzdisk)     genbzdisk;;
 	fdimage144) genfdimage144;;
 	fdimage288) genfdimage288;;
-	isoimage)   genisoimage;;
+	isoimage)   geniso;;
 	*)          echo 'Unknown image format'; exit 1;
 esac
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 3fd8bc560faece4cb6253e87a4c092965a73e7af..45a63e00a6af9a12b4739246d6844ba94f766e71 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -1,6 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/jump_label.h>
 #include <asm/unwind_hints.h>
+#include <asm/cpufeatures.h>
+#include <asm/page_types.h>
+#include <asm/percpu.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor-flags.h>
 
 /*
 
@@ -187,6 +192,146 @@ For 32-bit we have the following conventions - kernel is built with
 #endif
 .endm
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+/*
+ * PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two
+ * halves:
+ */
+#define PTI_SWITCH_PGTABLES_MASK	(1<<PAGE_SHIFT)
+#define PTI_SWITCH_MASK		(PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT))
+
+.macro SET_NOFLUSH_BIT	reg:req
+	bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg
+.endm
+
+.macro ADJUST_KERNEL_CR3 reg:req
+	ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID
+	/* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
+	andq    $(~PTI_SWITCH_MASK), \reg
+.endm
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+	mov	%cr3, \scratch_reg
+	ADJUST_KERNEL_CR3 \scratch_reg
+	mov	\scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+#define THIS_CPU_user_pcid_flush_mask   \
+	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
+
+.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+	mov	%cr3, \scratch_reg
+
+	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
+
+	/*
+	 * Test if the ASID needs a flush.
+	 */
+	movq	\scratch_reg, \scratch_reg2
+	andq	$(0x7FF), \scratch_reg		/* mask ASID */
+	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
+	jnc	.Lnoflush_\@
+
+	/* Flush needed, clear the bit */
+	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
+	movq	\scratch_reg2, \scratch_reg
+	jmp	.Lwrcr3_\@
+
+.Lnoflush_\@:
+	movq	\scratch_reg2, \scratch_reg
+	SET_NOFLUSH_BIT \scratch_reg
+
+.Lwrcr3_\@:
+	/* Flip the PGD and ASID to the user version */
+	orq     $(PTI_SWITCH_MASK), \scratch_reg
+	mov	\scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req
+	pushq	%rax
+	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax
+	popq	%rax
+.endm
+
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
+	movq	%cr3, \scratch_reg
+	movq	\scratch_reg, \save_reg
+	/*
+	 * Is the "switch mask" all zero?  That means that both of
+	 * these are zero:
+	 *
+	 *	1. The user/kernel PCID bit, and
+	 *	2. The user/kernel "bit" that points CR3 to the
+	 *	   bottom half of the 8k PGD
+	 *
+	 * That indicates a kernel CR3 value, not a user CR3.
+	 */
+	testq	$(PTI_SWITCH_MASK), \scratch_reg
+	jz	.Ldone_\@
+
+	ADJUST_KERNEL_CR3 \scratch_reg
+	movq	\scratch_reg, %cr3
+
+.Ldone_\@:
+.endm
+
+.macro RESTORE_CR3 scratch_reg:req save_reg:req
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+
+	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
+
+	/*
+	 * KERNEL pages can always resume with NOFLUSH as we do
+	 * explicit flushes.
+	 */
+	bt	$X86_CR3_PTI_SWITCH_BIT, \save_reg
+	jnc	.Lnoflush_\@
+
+	/*
+	 * Check if there's a pending flush for the user ASID we're
+	 * about to set.
+	 */
+	movq	\save_reg, \scratch_reg
+	andq	$(0x7FF), \scratch_reg
+	bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask
+	jnc	.Lnoflush_\@
+
+	btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask
+	jmp	.Lwrcr3_\@
+
+.Lnoflush_\@:
+	SET_NOFLUSH_BIT \save_reg
+
+.Lwrcr3_\@:
+	/*
+	 * The CR3 write could be avoided when not changing its value,
+	 * but would require a CR3 read *and* a scratch register.
+	 */
+	movq	\save_reg, %cr3
+.Lend_\@:
+.endm
+
+#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+.endm
+.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
+.endm
+.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req
+.endm
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+.endm
+.macro RESTORE_CR3 scratch_reg:req save_reg:req
+.endm
+
+#endif
+
 #endif /* CONFIG_X86_64 */
 
 /*
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 4838037f97f6edffda62b5b045c837fcc29402f0..ace8f321a5a1f2d1331cc4331a1922c9ed3d8bc1 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -941,9 +941,10 @@ ENTRY(debug)
 	movl	%esp, %eax			# pt_regs pointer
 
 	/* Are we currently on the SYSENTER stack? */
-	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
-	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
-	cmpl	$SIZEOF_SYSENTER_stack, %ecx
+	movl	PER_CPU_VAR(cpu_entry_area), %ecx
+	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+	subl	%eax, %ecx	/* ecx = (end of entry_stack) - esp */
+	cmpl	$SIZEOF_entry_stack, %ecx
 	jb	.Ldebug_from_sysenter_stack
 
 	TRACE_IRQS_OFF
@@ -984,9 +985,10 @@ ENTRY(nmi)
 	movl	%esp, %eax			# pt_regs pointer
 
 	/* Are we currently on the SYSENTER stack? */
-	PER_CPU(cpu_tss + CPU_TSS_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx)
-	subl	%eax, %ecx	/* ecx = (end of SYSENTER_stack) - esp */
-	cmpl	$SIZEOF_SYSENTER_stack, %ecx
+	movl	PER_CPU_VAR(cpu_entry_area), %ecx
+	addl	$CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
+	subl	%eax, %ecx	/* ecx = (end of entry_stack) - esp */
+	cmpl	$SIZEOF_entry_stack, %ecx
 	jb	.Lnmi_from_sysenter_stack
 
 	/* Not on SYSENTER stack. */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index f81d50d7ceacdefa06d61482687937096c68421c..f048e384ff54e06530b657efc3b00cdf50f1ce5b 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -23,7 +23,6 @@
 #include <asm/segment.h>
 #include <asm/cache.h>
 #include <asm/errno.h>
-#include "calling.h"
 #include <asm/asm-offsets.h>
 #include <asm/msr.h>
 #include <asm/unistd.h>
@@ -40,6 +39,8 @@
 #include <asm/frame.h>
 #include <linux/err.h>
 
+#include "calling.h"
+
 .code64
 .section .entry.text, "ax"
 
@@ -140,6 +141,67 @@ END(native_usergs_sysret64)
  * with them due to bugs in both AMD and Intel CPUs.
  */
 
+	.pushsection .entry_trampoline, "ax"
+
+/*
+ * The code in here gets remapped into cpu_entry_area's trampoline.  This means
+ * that the assembler and linker have the wrong idea as to where this code
+ * lives (and, in fact, it's mapped more than once, so it's not even at a
+ * fixed address).  So we can't reference any symbols outside the entry
+ * trampoline and expect it to work.
+ *
+ * Instead, we carefully abuse %rip-relative addressing.
+ * _entry_trampoline(%rip) refers to the start of the remapped) entry
+ * trampoline.  We can thus find cpu_entry_area with this macro:
+ */
+
+#define CPU_ENTRY_AREA \
+	_entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
+
+/* The top word of the SYSENTER stack is hot and is usable as scratch space. */
+#define RSP_SCRATCH	CPU_ENTRY_AREA_entry_stack + \
+			SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
+
+ENTRY(entry_SYSCALL_64_trampoline)
+	UNWIND_HINT_EMPTY
+	swapgs
+
+	/* Stash the user RSP. */
+	movq	%rsp, RSP_SCRATCH
+
+	/* Note: using %rsp as a scratch reg. */
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
+	/* Load the top of the task stack into RSP */
+	movq	CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
+
+	/* Start building the simulated IRET frame. */
+	pushq	$__USER_DS			/* pt_regs->ss */
+	pushq	RSP_SCRATCH			/* pt_regs->sp */
+	pushq	%r11				/* pt_regs->flags */
+	pushq	$__USER_CS			/* pt_regs->cs */
+	pushq	%rcx				/* pt_regs->ip */
+
+	/*
+	 * x86 lacks a near absolute jump, and we can't jump to the real
+	 * entry text with a relative jump.  We could push the target
+	 * address and then use retq, but this destroys the pipeline on
+	 * many CPUs (wasting over 20 cycles on Sandy Bridge).  Instead,
+	 * spill RDI and restore it in a second-stage trampoline.
+	 */
+	pushq	%rdi
+	movq	$entry_SYSCALL_64_stage2, %rdi
+	jmp	*%rdi
+END(entry_SYSCALL_64_trampoline)
+
+	.popsection
+
+ENTRY(entry_SYSCALL_64_stage2)
+	UNWIND_HINT_EMPTY
+	popq	%rdi
+	jmp	entry_SYSCALL_64_after_hwframe
+END(entry_SYSCALL_64_stage2)
+
 ENTRY(entry_SYSCALL_64)
 	UNWIND_HINT_EMPTY
 	/*
@@ -149,6 +211,10 @@ ENTRY(entry_SYSCALL_64)
 	 */
 
 	swapgs
+	/*
+	 * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
+	 * is not required to switch CR3.
+	 */
 	movq	%rsp, PER_CPU_VAR(rsp_scratch)
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
@@ -330,8 +396,25 @@ syscall_return_via_sysret:
 	popq	%rsi	/* skip rcx */
 	popq	%rdx
 	popq	%rsi
+
+	/*
+	 * Now all regs are restored except RSP and RDI.
+	 * Save old stack pointer and switch to trampoline stack.
+	 */
+	movq	%rsp, %rdi
+	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+	pushq	RSP-RDI(%rdi)	/* RSP */
+	pushq	(%rdi)		/* RDI */
+
+	/*
+	 * We are on the trampoline stack.  All regs except RDI are live.
+	 * We can do future final exit work right here.
+	 */
+	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
+
 	popq	%rdi
-	movq	RSP-ORIG_RAX(%rsp), %rsp
+	popq	%rsp
 	USERGS_SYSRET64
 END(entry_SYSCALL_64)
 
@@ -466,12 +549,13 @@ END(irq_entries_start)
 
 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
 #ifdef CONFIG_DEBUG_ENTRY
-	pushfq
-	testl $X86_EFLAGS_IF, (%rsp)
+	pushq %rax
+	SAVE_FLAGS(CLBR_RAX)
+	testl $X86_EFLAGS_IF, %eax
 	jz .Lokay_\@
 	ud2
 .Lokay_\@:
-	addq $8, %rsp
+	popq %rax
 #endif
 .endm
 
@@ -563,6 +647,13 @@ END(irq_entries_start)
 /* 0(%rsp): ~(interrupt number) */
 	.macro interrupt func
 	cld
+
+	testb	$3, CS-ORIG_RAX(%rsp)
+	jz	1f
+	SWAPGS
+	call	switch_to_thread_stack
+1:
+
 	ALLOC_PT_GPREGS_ON_STACK
 	SAVE_C_REGS
 	SAVE_EXTRA_REGS
@@ -572,12 +663,8 @@ END(irq_entries_start)
 	jz	1f
 
 	/*
-	 * IRQ from user mode.  Switch to kernel gsbase and inform context
-	 * tracking that we're in kernel mode.
-	 */
-	SWAPGS
-
-	/*
+	 * IRQ from user mode.
+	 *
 	 * We need to tell lockdep that IRQs are off.  We can't do this until
 	 * we fix gsbase, and we should do it before enter_from_user_mode
 	 * (which can take locks).  Since TRACE_IRQS_OFF idempotent,
@@ -630,10 +717,43 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
 	ud2
 1:
 #endif
-	SWAPGS
 	POP_EXTRA_REGS
-	POP_C_REGS
-	addq	$8, %rsp	/* skip regs->orig_ax */
+	popq	%r11
+	popq	%r10
+	popq	%r9
+	popq	%r8
+	popq	%rax
+	popq	%rcx
+	popq	%rdx
+	popq	%rsi
+
+	/*
+	 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
+	 * Save old stack pointer and switch to trampoline stack.
+	 */
+	movq	%rsp, %rdi
+	movq	PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
+
+	/* Copy the IRET frame to the trampoline stack. */
+	pushq	6*8(%rdi)	/* SS */
+	pushq	5*8(%rdi)	/* RSP */
+	pushq	4*8(%rdi)	/* EFLAGS */
+	pushq	3*8(%rdi)	/* CS */
+	pushq	2*8(%rdi)	/* RIP */
+
+	/* Push user RDI on the trampoline stack. */
+	pushq	(%rdi)
+
+	/*
+	 * We are on the trampoline stack.  All regs except RDI are live.
+	 * We can do future final exit work right here.
+	 */
+
+	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
+
+	/* Restore RDI. */
+	popq	%rdi
+	SWAPGS
 	INTERRUPT_RETURN
 
 
@@ -713,7 +833,9 @@ native_irq_return_ldt:
 	 */
 
 	pushq	%rdi				/* Stash user RDI */
-	SWAPGS
+	SWAPGS					/* to kernel GS */
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi	/* to kernel CR3 */
+
 	movq	PER_CPU_VAR(espfix_waddr), %rdi
 	movq	%rax, (0*8)(%rdi)		/* user RAX */
 	movq	(1*8)(%rsp), %rax		/* user RIP */
@@ -729,7 +851,6 @@ native_irq_return_ldt:
 	/* Now RAX == RSP. */
 
 	andl	$0xffff0000, %eax		/* RAX = (RSP & 0xffff0000) */
-	popq	%rdi				/* Restore user RDI */
 
 	/*
 	 * espfix_stack[31:16] == 0.  The page tables are set up such that
@@ -740,7 +861,11 @@ native_irq_return_ldt:
 	 * still points to an RO alias of the ESPFIX stack.
 	 */
 	orq	PER_CPU_VAR(espfix_stack), %rax
-	SWAPGS
+
+	SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
+	SWAPGS					/* to user GS */
+	popq	%rdi				/* Restore user RDI */
+
 	movq	%rax, %rsp
 	UNWIND_HINT_IRET_REGS offset=8
 
@@ -829,7 +954,35 @@ apicinterrupt IRQ_WORK_VECTOR			irq_work_interrupt		smp_irq_work_interrupt
 /*
  * Exception entry points.
  */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+
+/*
+ * Switch to the thread stack.  This is called with the IRET frame and
+ * orig_ax on the stack.  (That is, RDI..R12 are not on the stack and
+ * space has not been allocated for them.)
+ */
+ENTRY(switch_to_thread_stack)
+	UNWIND_HINT_FUNC
+
+	pushq	%rdi
+	/* Need to switch before accessing the thread stack. */
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+	movq	%rsp, %rdi
+	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+	UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
+
+	pushq	7*8(%rdi)		/* regs->ss */
+	pushq	6*8(%rdi)		/* regs->rsp */
+	pushq	5*8(%rdi)		/* regs->eflags */
+	pushq	4*8(%rdi)		/* regs->cs */
+	pushq	3*8(%rdi)		/* regs->ip */
+	pushq	2*8(%rdi)		/* regs->orig_ax */
+	pushq	8(%rdi)			/* return address */
+	UNWIND_HINT_FUNC
+
+	movq	(%rdi), %rdi
+	ret
+END(switch_to_thread_stack)
 
 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
 ENTRY(\sym)
@@ -848,11 +1001,12 @@ ENTRY(\sym)
 
 	ALLOC_PT_GPREGS_ON_STACK
 
-	.if \paranoid
-	.if \paranoid == 1
+	.if \paranoid < 2
 	testb	$3, CS(%rsp)			/* If coming from userspace, switch stacks */
-	jnz	1f
+	jnz	.Lfrom_usermode_switch_stack_\@
 	.endif
+
+	.if \paranoid
 	call	paranoid_entry
 	.else
 	call	error_entry
@@ -894,20 +1048,15 @@ ENTRY(\sym)
 	jmp	error_exit
 	.endif
 
-	.if \paranoid == 1
+	.if \paranoid < 2
 	/*
-	 * Paranoid entry from userspace.  Switch stacks and treat it
+	 * Entry from userspace.  Switch stacks and treat it
 	 * as a normal entry.  This means that paranoid handlers
 	 * run in real process context if user_mode(regs).
 	 */
-1:
+.Lfrom_usermode_switch_stack_\@:
 	call	error_entry
 
-
-	movq	%rsp, %rdi			/* pt_regs pointer */
-	call	sync_regs
-	movq	%rax, %rsp			/* switch stack */
-
 	movq	%rsp, %rdi			/* pt_regs pointer */
 
 	.if \has_error_code
@@ -1119,7 +1268,11 @@ ENTRY(paranoid_entry)
 	js	1f				/* negative -> in kernel */
 	SWAPGS
 	xorl	%ebx, %ebx
-1:	ret
+
+1:
+	SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
+
+	ret
 END(paranoid_entry)
 
 /*
@@ -1141,6 +1294,7 @@ ENTRY(paranoid_exit)
 	testl	%ebx, %ebx			/* swapgs needed? */
 	jnz	.Lparanoid_exit_no_swapgs
 	TRACE_IRQS_IRETQ
+	RESTORE_CR3	scratch_reg=%rbx save_reg=%r14
 	SWAPGS_UNSAFE_STACK
 	jmp	.Lparanoid_exit_restore
 .Lparanoid_exit_no_swapgs:
@@ -1168,8 +1322,18 @@ ENTRY(error_entry)
 	 * from user mode due to an IRET fault.
 	 */
 	SWAPGS
+	/* We have user CR3.  Change to kernel CR3. */
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
 
 .Lerror_entry_from_usermode_after_swapgs:
+	/* Put us onto the real thread stack. */
+	popq	%r12				/* save return addr in %12 */
+	movq	%rsp, %rdi			/* arg0 = pt_regs pointer */
+	call	sync_regs
+	movq	%rax, %rsp			/* switch stack */
+	ENCODE_FRAME_POINTER
+	pushq	%r12
+
 	/*
 	 * We need to tell lockdep that IRQs are off.  We can't do this until
 	 * we fix gsbase, and we should do it before enter_from_user_mode
@@ -1206,6 +1370,7 @@ ENTRY(error_entry)
 	 * .Lgs_change's error handler with kernel gsbase.
 	 */
 	SWAPGS
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
 	jmp .Lerror_entry_done
 
 .Lbstep_iret:
@@ -1215,10 +1380,11 @@ ENTRY(error_entry)
 
 .Lerror_bad_iret:
 	/*
-	 * We came from an IRET to user mode, so we have user gsbase.
-	 * Switch to kernel gsbase:
+	 * We came from an IRET to user mode, so we have user
+	 * gsbase and CR3.  Switch to kernel gsbase and CR3:
 	 */
 	SWAPGS
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
 
 	/*
 	 * Pretend that the exception came from user mode: set up pt_regs
@@ -1250,6 +1416,10 @@ END(error_exit)
 /*
  * Runs on exception stack.  Xen PV does not go through this path at all,
  * so we can use real assembly here.
+ *
+ * Registers:
+ *	%r14: Used to save/restore the CR3 of the interrupted context
+ *	      when PAGE_TABLE_ISOLATION is in use.  Do not clobber.
  */
 ENTRY(nmi)
 	UNWIND_HINT_IRET_REGS
@@ -1313,6 +1483,7 @@ ENTRY(nmi)
 
 	swapgs
 	cld
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
 	movq	%rsp, %rdx
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 	UNWIND_HINT_IRET_REGS base=%rdx offset=8
@@ -1565,6 +1736,8 @@ end_repeat_nmi:
 	movq	$-1, %rsi
 	call	do_nmi
 
+	RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
+
 	testl	%ebx, %ebx			/* swapgs needed? */
 	jnz	nmi_restore
 nmi_swapgs:
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 568e130d932cd2a7d44393e5fc52408cffe64f34..40f17009ec20cd5e4eff6147aa0468232d47e096 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -48,7 +48,11 @@
  */
 ENTRY(entry_SYSENTER_compat)
 	/* Interrupts are off on entry. */
-	SWAPGS_UNSAFE_STACK
+	SWAPGS
+
+	/* We are about to clobber %rsp anyway, clobbering here is OK */
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+
 	movq	PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
 	/*
@@ -215,6 +219,12 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
 	pushq   $0			/* pt_regs->r14 = 0 */
 	pushq   $0			/* pt_regs->r15 = 0 */
 
+	/*
+	 * We just saved %rdi so it is safe to clobber.  It is not
+	 * preserved during the C calls inside TRACE_IRQS_OFF anyway.
+	 */
+	SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
 	/*
 	 * User mode is traced as though IRQs are on, and SYSENTER
 	 * turned them off.
@@ -256,10 +266,22 @@ sysret32_from_system_call:
 	 * when the system call started, which is already known to user
 	 * code.  We zero R8-R10 to avoid info leaks.
          */
+	movq	RSP-ORIG_RAX(%rsp), %rsp
+
+	/*
+	 * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
+	 * on the process stack which is not mapped to userspace and
+	 * not readable after we SWITCH_TO_USER_CR3.  Delay the CR3
+	 * switch until after after the last reference to the process
+	 * stack.
+	 *
+	 * %r8/%r9 are zeroed before the sysret, thus safe to clobber.
+	 */
+	SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9
+
 	xorq	%r8, %r8
 	xorq	%r9, %r9
 	xorq	%r10, %r10
-	movq	RSP-ORIG_RAX(%rsp), %rsp
 	swapgs
 	sysretl
 END(entry_SYSCALL_compat)
@@ -306,8 +328,11 @@ ENTRY(entry_INT80_compat)
 	 */
 	movl	%eax, %eax
 
-	/* Construct struct pt_regs on stack (iret frame is already on stack) */
 	pushq	%rax			/* pt_regs->orig_ax */
+
+	/* switch to thread stack expects orig_ax to be pushed */
+	call	switch_to_thread_stack
+
 	pushq	%rdi			/* pt_regs->di */
 	pushq	%rsi			/* pt_regs->si */
 	pushq	%rdx			/* pt_regs->dx */
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index f279ba2643dc8933b9659242082e7ef2ea2d9dd6..577fa8adb785baf5ea1c993a2bbc88adf43fbbcc 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -37,6 +37,7 @@
 #include <asm/unistd.h>
 #include <asm/fixmap.h>
 #include <asm/traps.h>
+#include <asm/paravirt.h>
 
 #define CREATE_TRACE_POINTS
 #include "vsyscall_trace.h"
@@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
 
 	WARN_ON_ONCE(address != regs->ip);
 
+	/* This should be unreachable in NATIVE mode. */
+	if (WARN_ON(vsyscall_mode == NATIVE))
+		return false;
+
 	if (vsyscall_mode == NONE) {
 		warn_bad_vsyscall(KERN_INFO, regs,
 				  "vsyscall attempted with vsyscall=none");
@@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr)
 	return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
 }
 
+/*
+ * The VSYSCALL page is the only user-accessible page in the kernel address
+ * range.  Normally, the kernel page tables can have _PAGE_USER clear, but
+ * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls
+ * are enabled.
+ *
+ * Some day we may create a "minimal" vsyscall mode in which we emulate
+ * vsyscalls but leave the page not present.  If so, we skip calling
+ * this.
+ */
+void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
+{
+	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
+	set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
+	p4d = p4d_offset(pgd, VSYSCALL_ADDR);
+#if CONFIG_PGTABLE_LEVELS >= 5
+	p4d->p4d |= _PAGE_USER;
+#endif
+	pud = pud_offset(p4d, VSYSCALL_ADDR);
+	set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
+	pmd = pmd_offset(pud, VSYSCALL_ADDR);
+	set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER));
+}
+
 void __init map_vsyscall(void)
 {
 	extern char __vsyscall_page;
 	unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
 
-	if (vsyscall_mode != NONE)
+	if (vsyscall_mode != NONE) {
 		__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
 			     vsyscall_mode == NATIVE
 			     ? PAGE_KERNEL_VSYSCALL
 			     : PAGE_KERNEL_VVAR);
+		set_vsyscall_pgtable_user_bits(swapper_pg_dir);
+	}
 
 	BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
 		     (unsigned long)VSYSCALL_ADDR);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 09c26a4f139c125e000675689ebc983acd8ab91a..731153a4681e73f761dea8c0c15ce6757b89860e 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3847,6 +3847,8 @@ static struct attribute *intel_pmu_attrs[] = {
 
 __init int intel_pmu_init(void)
 {
+	struct attribute **extra_attr = NULL;
+	struct attribute **to_free = NULL;
 	union cpuid10_edx edx;
 	union cpuid10_eax eax;
 	union cpuid10_ebx ebx;
@@ -3854,7 +3856,6 @@ __init int intel_pmu_init(void)
 	unsigned int unused;
 	struct extra_reg *er;
 	int version, i;
-	struct attribute **extra_attr = NULL;
 	char *name;
 
 	if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
@@ -4294,6 +4295,7 @@ __init int intel_pmu_init(void)
 		extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
 			hsw_format_attr : nhm_format_attr;
 		extra_attr = merge_attr(extra_attr, skl_format_attr);
+		to_free = extra_attr;
 		x86_pmu.cpu_events = get_hsw_events_attrs();
 		intel_pmu_pebs_data_source_skl(
 			boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
@@ -4401,6 +4403,7 @@ __init int intel_pmu_init(void)
 		pr_cont("full-width counters, ");
 	}
 
+	kfree(to_free);
 	return 0;
 }
 
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 3674a4b6f8bd0c5f12223b8f5c16067a933450df..8f0aace08b87975489a2401859e51ef20d82838a 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -3,16 +3,18 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/perf_event.h>
 #include <asm/insn.h>
 
 #include "../perf_event.h"
 
+/* Waste a full page so it can be mapped into the cpu_entry_area */
+DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
+
 /* The size of a BTS record in bytes: */
 #define BTS_RECORD_SIZE		24
 
-#define BTS_BUFFER_SIZE		(PAGE_SIZE << 4)
-#define PEBS_BUFFER_SIZE	(PAGE_SIZE << 4)
 #define PEBS_FIXUP_SIZE		PAGE_SIZE
 
 /*
@@ -279,17 +281,52 @@ void fini_debug_store_on_cpu(int cpu)
 
 static DEFINE_PER_CPU(void *, insn_buffer);
 
-static int alloc_pebs_buffer(int cpu)
+static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
 {
-	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+	phys_addr_t pa;
+	size_t msz = 0;
+
+	pa = virt_to_phys(addr);
+	for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
+		cea_set_pte(cea, pa, prot);
+}
+
+static void ds_clear_cea(void *cea, size_t size)
+{
+	size_t msz = 0;
+
+	for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
+		cea_set_pte(cea, 0, PAGE_NONE);
+}
+
+static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
+{
+	unsigned int order = get_order(size);
 	int node = cpu_to_node(cpu);
-	int max;
-	void *buffer, *ibuffer;
+	struct page *page;
+
+	page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
+	return page ? page_address(page) : NULL;
+}
+
+static void dsfree_pages(const void *buffer, size_t size)
+{
+	if (buffer)
+		free_pages((unsigned long)buffer, get_order(size));
+}
+
+static int alloc_pebs_buffer(int cpu)
+{
+	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+	struct debug_store *ds = hwev->ds;
+	size_t bsiz = x86_pmu.pebs_buffer_size;
+	int max, node = cpu_to_node(cpu);
+	void *buffer, *ibuffer, *cea;
 
 	if (!x86_pmu.pebs)
 		return 0;
 
-	buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
+	buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
 	if (unlikely(!buffer))
 		return -ENOMEM;
 
@@ -300,25 +337,27 @@ static int alloc_pebs_buffer(int cpu)
 	if (x86_pmu.intel_cap.pebs_format < 2) {
 		ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
 		if (!ibuffer) {
-			kfree(buffer);
+			dsfree_pages(buffer, bsiz);
 			return -ENOMEM;
 		}
 		per_cpu(insn_buffer, cpu) = ibuffer;
 	}
-
-	max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
-
-	ds->pebs_buffer_base = (u64)(unsigned long)buffer;
+	hwev->ds_pebs_vaddr = buffer;
+	/* Update the cpu entry area mapping */
+	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
+	ds->pebs_buffer_base = (unsigned long) cea;
+	ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
 	ds->pebs_index = ds->pebs_buffer_base;
-	ds->pebs_absolute_maximum = ds->pebs_buffer_base +
-		max * x86_pmu.pebs_record_size;
-
+	max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
+	ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
 	return 0;
 }
 
 static void release_pebs_buffer(int cpu)
 {
-	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+	struct debug_store *ds = hwev->ds;
+	void *cea;
 
 	if (!ds || !x86_pmu.pebs)
 		return;
@@ -326,73 +365,70 @@ static void release_pebs_buffer(int cpu)
 	kfree(per_cpu(insn_buffer, cpu));
 	per_cpu(insn_buffer, cpu) = NULL;
 
-	kfree((void *)(unsigned long)ds->pebs_buffer_base);
+	/* Clear the fixmap */
+	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
+	ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
 	ds->pebs_buffer_base = 0;
+	dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
+	hwev->ds_pebs_vaddr = NULL;
 }
 
 static int alloc_bts_buffer(int cpu)
 {
-	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-	int node = cpu_to_node(cpu);
-	int max, thresh;
-	void *buffer;
+	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+	struct debug_store *ds = hwev->ds;
+	void *buffer, *cea;
+	int max;
 
 	if (!x86_pmu.bts)
 		return 0;
 
-	buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+	buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
 	if (unlikely(!buffer)) {
 		WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
 		return -ENOMEM;
 	}
-
-	max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
-	thresh = max / 16;
-
-	ds->bts_buffer_base = (u64)(unsigned long)buffer;
+	hwev->ds_bts_vaddr = buffer;
+	/* Update the fixmap */
+	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
+	ds->bts_buffer_base = (unsigned long) cea;
+	ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
 	ds->bts_index = ds->bts_buffer_base;
-	ds->bts_absolute_maximum = ds->bts_buffer_base +
-		max * BTS_RECORD_SIZE;
-	ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
-		thresh * BTS_RECORD_SIZE;
-
+	max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
+	ds->bts_absolute_maximum = ds->bts_buffer_base + max;
+	ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
 	return 0;
 }
 
 static void release_bts_buffer(int cpu)
 {
-	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
+	struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
+	struct debug_store *ds = hwev->ds;
+	void *cea;
 
 	if (!ds || !x86_pmu.bts)
 		return;
 
-	kfree((void *)(unsigned long)ds->bts_buffer_base);
+	/* Clear the fixmap */
+	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
+	ds_clear_cea(cea, BTS_BUFFER_SIZE);
 	ds->bts_buffer_base = 0;
+	dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
+	hwev->ds_bts_vaddr = NULL;
 }
 
 static int alloc_ds_buffer(int cpu)
 {
-	int node = cpu_to_node(cpu);
-	struct debug_store *ds;
-
-	ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
-	if (unlikely(!ds))
-		return -ENOMEM;
+	struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
 
+	memset(ds, 0, sizeof(*ds));
 	per_cpu(cpu_hw_events, cpu).ds = ds;
-
 	return 0;
 }
 
 static void release_ds_buffer(int cpu)
 {
-	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
-
-	if (!ds)
-		return;
-
 	per_cpu(cpu_hw_events, cpu).ds = NULL;
-	kfree(ds);
 }
 
 void release_ds_buffers(void)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f7aaadf9331fb75587e74a42b041d78b2a014fc0..8e4ea143ed96403d275bf6727801961db9a053d7 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -14,6 +14,8 @@
 
 #include <linux/perf_event.h>
 
+#include <asm/intel_ds.h>
+
 /* To enable MSR tracing please use the generic trace points. */
 
 /*
@@ -77,8 +79,6 @@ struct amd_nb {
 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 };
 
-/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS		8
 #define PEBS_COUNTER_MASK	((1ULL << MAX_PEBS_EVENTS) - 1)
 
 /*
@@ -95,23 +95,6 @@ struct amd_nb {
 	PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
 	PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
 
-/*
- * A debug store configuration.
- *
- * We only support architectures that use 64bit fields.
- */
-struct debug_store {
-	u64	bts_buffer_base;
-	u64	bts_index;
-	u64	bts_absolute_maximum;
-	u64	bts_interrupt_threshold;
-	u64	pebs_buffer_base;
-	u64	pebs_index;
-	u64	pebs_absolute_maximum;
-	u64	pebs_interrupt_threshold;
-	u64	pebs_event_reset[MAX_PEBS_EVENTS];
-};
-
 #define PEBS_REGS \
 	(PERF_REG_X86_AX | \
 	 PERF_REG_X86_BX | \
@@ -216,6 +199,8 @@ struct cpu_hw_events {
 	 * Intel DebugStore bits
 	 */
 	struct debug_store	*ds;
+	void			*ds_pebs_vaddr;
+	void			*ds_bts_vaddr;
 	u64			pebs_enabled;
 	int			n_pebs;
 	int			n_large_pebs;
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 219faaec51dfa192f69d8893c8844219c0c89029..386a6900e206f6578e3b38ee7f085d36ac50a928 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -136,6 +136,7 @@
 #endif
 
 #ifndef __ASSEMBLY__
+#ifndef __BPF__
 /*
  * This output constraint should be used for any inline asm which has a "call"
  * instruction.  Otherwise the asm may be inserted before the frame pointer
@@ -145,5 +146,6 @@
 register unsigned long current_stack_pointer asm(_ASM_SP);
 #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
 #endif
+#endif
 
 #endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
new file mode 100644
index 0000000000000000000000000000000000000000..4a7884b8dca55bc58f077a90ad93d6398bddd053
--- /dev/null
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _ASM_X86_CPU_ENTRY_AREA_H
+#define _ASM_X86_CPU_ENTRY_AREA_H
+
+#include <linux/percpu-defs.h>
+#include <asm/processor.h>
+#include <asm/intel_ds.h>
+
+/*
+ * cpu_entry_area is a percpu region that contains things needed by the CPU
+ * and early entry/exit code.  Real types aren't used for all fields here
+ * to avoid circular header dependencies.
+ *
+ * Every field is a virtual alias of some other allocated backing store.
+ * There is no direct allocation of a struct cpu_entry_area.
+ */
+struct cpu_entry_area {
+	char gdt[PAGE_SIZE];
+
+	/*
+	 * The GDT is just below entry_stack and thus serves (on x86_64) as
+	 * a a read-only guard page.
+	 */
+	struct entry_stack_page entry_stack_page;
+
+	/*
+	 * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
+	 * we need task switches to work, and task switches write to the TSS.
+	 */
+	struct tss_struct tss;
+
+	char entry_trampoline[PAGE_SIZE];
+
+#ifdef CONFIG_X86_64
+	/*
+	 * Exception stacks used for IST entries.
+	 *
+	 * In the future, this should have a separate slot for each stack
+	 * with guard pages between them.
+	 */
+	char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+	/*
+	 * Per CPU debug store for Intel performance monitoring. Wastes a
+	 * full page at the moment.
+	 */
+	struct debug_store cpu_debug_store;
+	/*
+	 * The actual PEBS/BTS buffers must be mapped to user space
+	 * Reserve enough fixmap PTEs.
+	 */
+	struct debug_store_buffers cpu_debug_buffers;
+#endif
+};
+
+#define CPU_ENTRY_AREA_SIZE	(sizeof(struct cpu_entry_area))
+#define CPU_ENTRY_AREA_TOT_SIZE	(CPU_ENTRY_AREA_SIZE * NR_CPUS)
+
+DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+
+extern void setup_cpu_entry_areas(void);
+extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
+
+#define	CPU_ENTRY_AREA_RO_IDT		CPU_ENTRY_AREA_BASE
+#define CPU_ENTRY_AREA_PER_CPU		(CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
+
+#define CPU_ENTRY_AREA_RO_IDT_VADDR	((void *)CPU_ENTRY_AREA_RO_IDT)
+
+#define CPU_ENTRY_AREA_MAP_SIZE			\
+	(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
+
+extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
+
+static inline struct entry_stack *cpu_entry_stack(int cpu)
+{
+	return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
+}
+
+#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index bf6a76202a779ee131b4df8c89449ab52abd0a79..ea9a7dde62e5c4d551ba89e429f911fb5c6603fd 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -135,6 +135,8 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
 	set_bit(bit, (unsigned long *)cpu_caps_set);	\
 } while (0)
 
+#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 800104c8a3edfee7f4f52a33b8451a51ee0ed90a..07cdd17157050e4123bdffc43347ecd58811bc1d 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -197,11 +197,12 @@
 #define X86_FEATURE_CAT_L3		( 7*32+ 4) /* Cache Allocation Technology L3 */
 #define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_INVPCID_SINGLE	( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
 
 #define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
-
+#define X86_FEATURE_PTI			( 7*32+11) /* Kernel Page Table Isolation enabled */
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PT		( 7*32+15) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512_4VNNIW	( 7*32+16) /* AVX-512 Neural Network Instructions */
@@ -340,5 +341,6 @@
 #define X86_BUG_SWAPGS_FENCE		X86_BUG(11) /* SWAPGS without input dep on GS */
 #define X86_BUG_MONITOR			X86_BUG(12) /* IPI required to wake up remote CPU */
 #define X86_BUG_AMD_E400		X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_INSECURE		X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 4011cb03ef08e52db15f52779ce366c26359a34b..13c5ee878a477902b8494532b24853b6c156a170 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -7,6 +7,7 @@
 #include <asm/mmu.h>
 #include <asm/fixmap.h>
 #include <asm/irq_vectors.h>
+#include <asm/cpu_entry_area.h>
 
 #include <linux/smp.h>
 #include <linux/percpu.h>
@@ -20,6 +21,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
 
 	desc->type		= (info->read_exec_only ^ 1) << 1;
 	desc->type	       |= info->contents << 2;
+	/* Set the ACCESS bit so it can be mapped RO */
+	desc->type	       |= 1;
 
 	desc->s			= 1;
 	desc->dpl		= 0x3;
@@ -60,17 +63,10 @@ static inline struct desc_struct *get_current_gdt_rw(void)
 	return this_cpu_ptr(&gdt_page)->gdt;
 }
 
-/* Get the fixmap index for a specific processor */
-static inline unsigned int get_cpu_gdt_ro_index(int cpu)
-{
-	return FIX_GDT_REMAP_BEGIN + cpu;
-}
-
 /* Provide the fixmap address of the remapped GDT */
 static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
 {
-	unsigned int idx = get_cpu_gdt_ro_index(cpu);
-	return (struct desc_struct *)__fix_to_virt(idx);
+	return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt;
 }
 
 /* Provide the current read-only GDT */
@@ -185,7 +181,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
 #endif
 }
 
-static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
+static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
 {
 	struct desc_struct *d = get_cpu_gdt_rw(cpu);
 	tss_desc tss;
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 14d6d50073142b0f49b06850ccd0d394546479ee..b027633e73003e121d7c043438ac7dbd10fc07a4 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -50,6 +50,12 @@
 # define DISABLE_LA57	(1<<(X86_FEATURE_LA57 & 31))
 #endif
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define DISABLE_PTI		0
+#else
+# define DISABLE_PTI		(1 << (X86_FEATURE_PTI & 31))
+#endif
+
 /*
  * Make sure to add features to the correct mask
  */
@@ -60,7 +66,7 @@
 #define DISABLED_MASK4	(DISABLE_PCID)
 #define DISABLED_MASK5	0
 #define DISABLED_MASK6	0
-#define DISABLED_MASK7	0
+#define DISABLED_MASK7	(DISABLE_PTI)
 #define DISABLED_MASK8	0
 #define DISABLED_MASK9	(DISABLE_MPX)
 #define DISABLED_MASK10	0
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
index 0211029076ea8b9ed6648b9bf298c99c8b2124ad..6777480d8a427eaaa07559f77985c125aa66bb6c 100644
--- a/arch/x86/include/asm/espfix.h
+++ b/arch/x86/include/asm/espfix.h
@@ -2,7 +2,7 @@
 #ifndef _ASM_X86_ESPFIX_H
 #define _ASM_X86_ESPFIX_H
 
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_ESPFIX64
 
 #include <asm/percpu.h>
 
@@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
 
 extern void init_espfix_bsp(void);
 extern void init_espfix_ap(int cpu);
-
-#endif /* CONFIG_X86_64 */
+#else
+static inline void init_espfix_ap(int cpu) { }
+#endif
 
 #endif /* _ASM_X86_ESPFIX_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index b0c505fe9a958c701fef6d96f281bb8ab1a773de..64c4a30e0d39621ff8587fc8da538cd3d1d9f144 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -44,7 +44,6 @@ extern unsigned long __FIXADDR_TOP;
 			 PAGE_SIZE)
 #endif
 
-
 /*
  * Here we define all the compile-time 'special' virtual
  * addresses. The point is to have a constant address at
@@ -84,7 +83,6 @@ enum fixed_addresses {
 	FIX_IO_APIC_BASE_0,
 	FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
 #endif
-	FIX_RO_IDT,	/* Virtual mapping for read-only IDT */
 #ifdef CONFIG_X86_32
 	FIX_KMAP_BEGIN,	/* reserved pte's for temporary kernel mappings */
 	FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
@@ -100,9 +98,6 @@ enum fixed_addresses {
 #ifdef	CONFIG_X86_INTEL_MID
 	FIX_LNW_VRTC,
 #endif
-	/* Fixmap entries to remap the GDTs, one per processor. */
-	FIX_GDT_REMAP_BEGIN,
-	FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
 
 #ifdef CONFIG_ACPI_APEI_GHES
 	/* Used for GHES mapping from assorted contexts */
@@ -143,7 +138,7 @@ enum fixed_addresses {
 extern void reserve_top_address(unsigned long reserve);
 
 #define FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START		(FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
 
 extern int fixmaps_set;
 
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 1b0a5abcd8aeb6e700013c5434aaeb0bba7a152f..96aa6b9884dc5b3bc8d54c9ef1c6258eea13a0d0 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -20,16 +20,7 @@
 #ifndef _ASM_X86_HYPERVISOR_H
 #define _ASM_X86_HYPERVISOR_H
 
-#ifdef CONFIG_HYPERVISOR_GUEST
-
-#include <asm/kvm_para.h>
-#include <asm/x86_init.h>
-#include <asm/xen/hypervisor.h>
-
-/*
- * x86 hypervisor information
- */
-
+/* x86 hypervisor types  */
 enum x86_hypervisor_type {
 	X86_HYPER_NATIVE = 0,
 	X86_HYPER_VMWARE,
@@ -39,6 +30,12 @@ enum x86_hypervisor_type {
 	X86_HYPER_KVM,
 };
 
+#ifdef CONFIG_HYPERVISOR_GUEST
+
+#include <asm/kvm_para.h>
+#include <asm/x86_init.h>
+#include <asm/xen/hypervisor.h>
+
 struct hypervisor_x86 {
 	/* Hypervisor name */
 	const char	*name;
@@ -58,7 +55,15 @@ struct hypervisor_x86 {
 
 extern enum x86_hypervisor_type x86_hyper_type;
 extern void init_hypervisor_platform(void);
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+	return x86_hyper_type == type;
+}
 #else
 static inline void init_hypervisor_platform(void) { }
+static inline bool hypervisor_is_type(enum x86_hypervisor_type type)
+{
+	return type == X86_HYPER_NATIVE;
+}
 #endif /* CONFIG_HYPERVISOR_GUEST */
 #endif /* _ASM_X86_HYPERVISOR_H */
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
new file mode 100644
index 0000000000000000000000000000000000000000..62a9f4966b4298ec2f3aa6c32d0881bd4296956c
--- /dev/null
+++ b/arch/x86/include/asm/intel_ds.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_INTEL_DS_H
+#define _ASM_INTEL_DS_H
+
+#include <linux/percpu-defs.h>
+
+#define BTS_BUFFER_SIZE		(PAGE_SIZE << 4)
+#define PEBS_BUFFER_SIZE	(PAGE_SIZE << 4)
+
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS		8
+
+/*
+ * A debug store configuration.
+ *
+ * We only support architectures that use 64bit fields.
+ */
+struct debug_store {
+	u64	bts_buffer_base;
+	u64	bts_index;
+	u64	bts_absolute_maximum;
+	u64	bts_interrupt_threshold;
+	u64	pebs_buffer_base;
+	u64	pebs_index;
+	u64	pebs_absolute_maximum;
+	u64	pebs_interrupt_threshold;
+	u64	pebs_event_reset[MAX_PEBS_EVENTS];
+} __aligned(PAGE_SIZE);
+
+DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
+
+struct debug_store_buffers {
+	char	bts_buffer[BTS_BUFFER_SIZE];
+	char	pebs_buffer[PEBS_BUFFER_SIZE];
+};
+
+#endif
diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h
new file mode 100644
index 0000000000000000000000000000000000000000..989cfa86de85184359e0205de9237f9e048e5e20
--- /dev/null
+++ b/arch/x86/include/asm/invpcid.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_INVPCID
+#define _ASM_X86_INVPCID
+
+static inline void __invpcid(unsigned long pcid, unsigned long addr,
+			     unsigned long type)
+{
+	struct { u64 d[2]; } desc = { { pcid, addr } };
+
+	/*
+	 * The memory clobber is because the whole point is to invalidate
+	 * stale TLB entries and, especially if we're flushing global
+	 * mappings, we don't want the compiler to reorder any subsequent
+	 * memory accesses before the TLB flush.
+	 *
+	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
+	 * invpcid (%rcx), %rax in long mode.
+	 */
+	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
+		      : : "m" (desc), "a" (type), "c" (&desc) : "memory");
+}
+
+#define INVPCID_TYPE_INDIV_ADDR		0
+#define INVPCID_TYPE_SINGLE_CTXT	1
+#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
+#define INVPCID_TYPE_ALL_NON_GLOBAL	3
+
+/* Flush all mappings for a given pcid and addr, not including globals. */
+static inline void invpcid_flush_one(unsigned long pcid,
+				     unsigned long addr)
+{
+	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
+}
+
+/* Flush all mappings for a given PCID, not including globals. */
+static inline void invpcid_flush_single_context(unsigned long pcid)
+{
+	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+}
+
+/* Flush all mappings, including globals, for all PCIDs. */
+static inline void invpcid_flush_all(void)
+{
+	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+}
+
+/* Flush all mappings for all PCIDs except globals. */
+static inline void invpcid_flush_all_nonglobals(void)
+{
+	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
+}
+
+#endif /* _ASM_X86_INVPCID */
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
index 139feef467f7e298c6f9db57c43facc64f5468b6..c066ffae222b769996b78c13ee5d063dc7c5c544 100644
--- a/arch/x86/include/asm/irqdomain.h
+++ b/arch/x86/include/asm/irqdomain.h
@@ -44,7 +44,7 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
 extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
 			      unsigned int nr_irqs);
 extern int mp_irqdomain_activate(struct irq_domain *domain,
-				 struct irq_data *irq_data, bool early);
+				 struct irq_data *irq_data, bool reserve);
 extern void mp_irqdomain_deactivate(struct irq_domain *domain,
 				    struct irq_data *irq_data);
 extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c8ef23f2c28f17c59308b9c41179c47f85e075ad..89f08955fff733c688a5ce4f4a0b8d74050ee617 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -142,6 +142,9 @@ static inline notrace unsigned long arch_local_irq_save(void)
 	swapgs;					\
 	sysretl
 
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(x)		pushfq; popq %rax
+#endif
 #else
 #define INTERRUPT_RETURN		iret
 #define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index f86a8caa561e8873c3f34e6e8b8cd509ebadd819..395c9631e000a3a17aa574c1b25fcc2cafd5b5fb 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
 extern void __show_regs(struct pt_regs *regs, int all);
+extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
 
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 9ea26f16749706fddd5b15e8bf557a9e6156e165..5ff3e8af2c2056b7fe19560ee2ba1ad7146aaf2a 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -3,6 +3,7 @@
 #define _ASM_X86_MMU_H
 
 #include <linux/spinlock.h>
+#include <linux/rwsem.h>
 #include <linux/mutex.h>
 #include <linux/atomic.h>
 
@@ -27,7 +28,8 @@ typedef struct {
 	atomic64_t tlb_gen;
 
 #ifdef CONFIG_MODIFY_LDT_SYSCALL
-	struct ldt_struct *ldt;
+	struct rw_semaphore	ldt_usr_sem;
+	struct ldt_struct	*ldt;
 #endif
 
 #ifdef CONFIG_X86_64
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 6d16d15d09a0daed96a1e3d670b6203d1779b98e..c931b88982a0ff59e3b67947cc606e452f327dc0 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -50,22 +50,53 @@ struct ldt_struct {
 	 * call gates.  On native, we could merge the ldt_struct and LDT
 	 * allocations, but it's not worth trying to optimize.
 	 */
-	struct desc_struct *entries;
-	unsigned int nr_entries;
+	struct desc_struct	*entries;
+	unsigned int		nr_entries;
+
+	/*
+	 * If PTI is in use, then the entries array is not mapped while we're
+	 * in user mode.  The whole array will be aliased at the addressed
+	 * given by ldt_slot_va(slot).  We use two slots so that we can allocate
+	 * and map, and enable a new LDT without invalidating the mapping
+	 * of an older, still-in-use LDT.
+	 *
+	 * slot will be -1 if this LDT doesn't have an alias mapping.
+	 */
+	int			slot;
 };
 
+/* This is a multiple of PAGE_SIZE. */
+#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
+
+static inline void *ldt_slot_va(int slot)
+{
+#ifdef CONFIG_X86_64
+	return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
+#else
+	BUG();
+#endif
+}
+
 /*
  * Used for LDT copy/destruction.
  */
-int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
+static inline void init_new_context_ldt(struct mm_struct *mm)
+{
+	mm->context.ldt = NULL;
+	init_rwsem(&mm->context.ldt_usr_sem);
+}
+int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
 void destroy_context_ldt(struct mm_struct *mm);
+void ldt_arch_exit_mmap(struct mm_struct *mm);
 #else	/* CONFIG_MODIFY_LDT_SYSCALL */
-static inline int init_new_context_ldt(struct task_struct *tsk,
-				       struct mm_struct *mm)
+static inline void init_new_context_ldt(struct mm_struct *mm) { }
+static inline int ldt_dup_context(struct mm_struct *oldmm,
+				  struct mm_struct *mm)
 {
 	return 0;
 }
-static inline void destroy_context_ldt(struct mm_struct *mm) {}
+static inline void destroy_context_ldt(struct mm_struct *mm) { }
+static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
 #endif
 
 static inline void load_mm_ldt(struct mm_struct *mm)
@@ -90,10 +121,31 @@ static inline void load_mm_ldt(struct mm_struct *mm)
 	 * that we can see.
 	 */
 
-	if (unlikely(ldt))
-		set_ldt(ldt->entries, ldt->nr_entries);
-	else
+	if (unlikely(ldt)) {
+		if (static_cpu_has(X86_FEATURE_PTI)) {
+			if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
+				/*
+				 * Whoops -- either the new LDT isn't mapped
+				 * (if slot == -1) or is mapped into a bogus
+				 * slot (if slot > 1).
+				 */
+				clear_LDT();
+				return;
+			}
+
+			/*
+			 * If page table isolation is enabled, ldt->entries
+			 * will not be mapped in the userspace pagetables.
+			 * Tell the CPU to access the LDT through the alias
+			 * at ldt_slot_va(ldt->slot).
+			 */
+			set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
+		} else {
+			set_ldt(ldt->entries, ldt->nr_entries);
+		}
+	} else {
 		clear_LDT();
+	}
 #else
 	clear_LDT();
 #endif
@@ -132,18 +184,21 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
 static inline int init_new_context(struct task_struct *tsk,
 				   struct mm_struct *mm)
 {
+	mutex_init(&mm->context.lock);
+
 	mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
 	atomic64_set(&mm->context.tlb_gen, 0);
 
-	#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
 		/* pkey 0 is the default and always allocated */
 		mm->context.pkey_allocation_map = 0x1;
 		/* -1 means unallocated or invalid */
 		mm->context.execute_only_pkey = -1;
 	}
-	#endif
-	return init_new_context_ldt(tsk, mm);
+#endif
+	init_new_context_ldt(mm);
+	return 0;
 }
 static inline void destroy_context(struct mm_struct *mm)
 {
@@ -176,15 +231,16 @@ do {						\
 } while (0)
 #endif
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-				 struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
 	paravirt_arch_dup_mmap(oldmm, mm);
+	return ldt_dup_context(oldmm, mm);
 }
 
 static inline void arch_exit_mmap(struct mm_struct *mm)
 {
 	paravirt_arch_exit_mmap(mm);
+	ldt_arch_exit_mmap(mm);
 }
 
 #ifdef CONFIG_X86_64
@@ -281,33 +337,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 	return __pkru_allows_pkey(vma_pkey(vma), write);
 }
 
-/*
- * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID
- * bits.  This serves two purposes.  It prevents a nasty situation in
- * which PCID-unaware code saves CR3, loads some other value (with PCID
- * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if
- * the saved ASID was nonzero.  It also means that any bugs involving
- * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger
- * deterministically.
- */
-
-static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid)
-{
-	if (static_cpu_has(X86_FEATURE_PCID)) {
-		VM_WARN_ON_ONCE(asid > 4094);
-		return __sme_pa(mm->pgd) | (asid + 1);
-	} else {
-		VM_WARN_ON_ONCE(asid != 0);
-		return __sme_pa(mm->pgd);
-	}
-}
-
-static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
-{
-	VM_WARN_ON_ONCE(asid > 4094);
-	return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH;
-}
-
 /*
  * This can be used from process context to figure out what the value of
  * CR3 is without needing to do a (slow) __read_cr3().
@@ -317,7 +346,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid)
  */
 static inline unsigned long __get_current_cr3_fast(void)
 {
-	unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm),
+	unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
 		this_cpu_read(cpu_tlbstate.loaded_mm_asid));
 
 	/* For now, be very restrictive about when this can be called. */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 283efcaac8aff86f2c004bc23e4b8642cbf3d527..892df375b6155a51f584760efb9f9e77c3f732e8 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -927,6 +927,15 @@ extern void default_banner(void);
 	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),	\
 		  CLBR_NONE,						\
 		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+
+#ifdef CONFIG_DEBUG_ENTRY
+#define SAVE_FLAGS(clobbers)                                        \
+	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
+		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
+		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl);    \
+		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+#endif
+
 #endif	/* CONFIG_X86_32 */
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 4b5e1eafada731cdedd48d771d79d55e766a84eb..aff42e1da6ee1591bbec15ce6f0543953789d19c 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(unsigned long pfn) {}
  */
 extern gfp_t __userpte_alloc_gfp;
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Instead of one PGD, we acquire two PGDs.  Being order-1, it is
+ * both 8k in size and 8k-aligned.  That lets us just flip bit 12
+ * in a pointer to swap between the two 4k halves.
+ */
+#define PGD_ALLOCATION_ORDER 1
+#else
+#define PGD_ALLOCATION_ORDER 0
+#endif
+
 /*
  * Allocate and free page tables.
  */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 95e2dfd755218ccfaf6417b44c822b545a35568e..e42b8943cb1a311a00ddceb36129ede3012489ef 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -28,6 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD];
 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
 
 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
 void ptdump_walk_pgd_level_checkwx(void);
 
 #ifdef CONFIG_DEBUG_WX
@@ -841,7 +842,12 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
 
 static inline int p4d_bad(p4d_t p4d)
 {
-	return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
+	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
+
+	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+		ignore_flags |= _PAGE_NX;
+
+	return (p4d_flags(p4d) & ~ignore_flags) != 0;
 }
 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 
@@ -875,7 +881,12 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 
 static inline int pgd_bad(pgd_t pgd)
 {
-	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+	unsigned long ignore_flags = _PAGE_USER;
+
+	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+		ignore_flags |= _PAGE_NX;
+
+	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
 }
 
 static inline int pgd_none(pgd_t pgd)
@@ -904,7 +915,11 @@ static inline int pgd_none(pgd_t pgd)
  * pgd_offset() returns a (pgd_t *)
  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  */
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
+#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
+/*
+ * a shortcut to get a pgd_t in a given mm
+ */
+#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
 /*
  * a shortcut which implies the use of the kernel's pgd, instead
  * of a process's
@@ -1106,7 +1121,14 @@ static inline int pud_write(pud_t pud)
  */
 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
 {
-       memcpy(dst, src, count * sizeof(pgd_t));
+	memcpy(dst, src, count * sizeof(pgd_t));
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return;
+	/* Clone the user space pgd as well */
+	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
+	       count * sizeof(pgd_t));
+#endif
 }
 
 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
index f2ca9b28fd68303f4494775564aa9da77ddcd53a..ce245b0cdfcaa42bd932a387bbb189ee7349bfef 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
 #define LAST_PKMAP 1024
 #endif
 
-#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1))	\
-		    & PMD_MASK)
+/*
+ * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
+ * to avoid include recursion hell
+ */
+#define CPU_ENTRY_AREA_PAGES	(NR_CPUS * 40)
+
+#define CPU_ENTRY_AREA_BASE				\
+	((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)
+
+#define PKMAP_BASE		\
+	((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
 
 #ifdef CONFIG_HIGHMEM
 # define VMALLOC_END	(PKMAP_BASE - 2 * PAGE_SIZE)
 #else
-# define VMALLOC_END	(FIXADDR_START - 2 * PAGE_SIZE)
+# define VMALLOC_END	(CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
 #endif
 
 #define MODULES_VADDR	VMALLOC_START
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index e9f05331e732a057b341e78977d666b8cfe35289..81462e9a34f6af49645a08f55c7d67e0144dbb77 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -131,9 +131,97 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp)
 #endif
 }
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
+ * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
+ * the user one is in the last 4k.  To switch between them, you
+ * just need to flip the 12th bit in their addresses.
+ */
+#define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
+
+/*
+ * This generates better code than the inline assembly in
+ * __set_bit().
+ */
+static inline void *ptr_set_bit(void *ptr, int bit)
+{
+	unsigned long __ptr = (unsigned long)ptr;
+
+	__ptr |= BIT(bit);
+	return (void *)__ptr;
+}
+static inline void *ptr_clear_bit(void *ptr, int bit)
+{
+	unsigned long __ptr = (unsigned long)ptr;
+
+	__ptr &= ~BIT(bit);
+	return (void *)__ptr;
+}
+
+static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
+{
+	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
+{
+	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
+{
+	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+
+static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
+{
+	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
+}
+#endif /* CONFIG_PAGE_TABLE_ISOLATION */
+
+/*
+ * Page table pages are page-aligned.  The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ *
+ * Returns true for parts of the PGD that map userspace and
+ * false for the parts that map the kernel.
+ */
+static inline bool pgdp_maps_userspace(void *__ptr)
+{
+	unsigned long ptr = (unsigned long)__ptr;
+
+	return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2);
+}
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd);
+
+/*
+ * Take a PGD location (pgdp) and a pgd value that needs to be set there.
+ * Populates the user and returns the resulting PGD that must be set in
+ * the kernel copy of the page tables.
+ */
+static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return pgd;
+	return __pti_set_user_pgd(pgdp, pgd);
+}
+#else
+static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+	return pgd;
+}
+#endif
+
 static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
+#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL)
+	p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd);
+#else
 	*p4dp = p4d;
+#endif
 }
 
 static inline void native_p4d_clear(p4d_t *p4d)
@@ -147,7 +235,11 @@ static inline void native_p4d_clear(p4d_t *p4d)
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	*pgdp = pti_set_user_pgd(pgdp, pgd);
+#else
 	*pgdp = pgd;
+#endif
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 6d5f45dcd4a13caafbf184f323d0725c2d5f53e4..b97a539bcdeee8ca47e39e7353b3e9d1757da24b 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -76,32 +76,45 @@ typedef struct { pteval_t pte; } pte_t;
 #define PGDIR_MASK	(~(PGDIR_SIZE - 1))
 
 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
-#define MAXMEM		_AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+#define MAXMEM			_AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+
 #ifdef CONFIG_X86_5LEVEL
-#define VMALLOC_SIZE_TB _AC(16384, UL)
-#define __VMALLOC_BASE	_AC(0xff92000000000000, UL)
-#define __VMEMMAP_BASE	_AC(0xffd4000000000000, UL)
+# define VMALLOC_SIZE_TB	_AC(12800, UL)
+# define __VMALLOC_BASE		_AC(0xffa0000000000000, UL)
+# define __VMEMMAP_BASE		_AC(0xffd4000000000000, UL)
+# define LDT_PGD_ENTRY		_AC(-112, UL)
+# define LDT_BASE_ADDR		(LDT_PGD_ENTRY << PGDIR_SHIFT)
 #else
-#define VMALLOC_SIZE_TB	_AC(32, UL)
-#define __VMALLOC_BASE	_AC(0xffffc90000000000, UL)
-#define __VMEMMAP_BASE	_AC(0xffffea0000000000, UL)
+# define VMALLOC_SIZE_TB	_AC(32, UL)
+# define __VMALLOC_BASE		_AC(0xffffc90000000000, UL)
+# define __VMEMMAP_BASE		_AC(0xffffea0000000000, UL)
+# define LDT_PGD_ENTRY		_AC(-4, UL)
+# define LDT_BASE_ADDR		(LDT_PGD_ENTRY << PGDIR_SHIFT)
 #endif
+
 #ifdef CONFIG_RANDOMIZE_MEMORY
-#define VMALLOC_START	vmalloc_base
-#define VMEMMAP_START	vmemmap_base
+# define VMALLOC_START		vmalloc_base
+# define VMEMMAP_START		vmemmap_base
 #else
-#define VMALLOC_START	__VMALLOC_BASE
-#define VMEMMAP_START	__VMEMMAP_BASE
+# define VMALLOC_START		__VMALLOC_BASE
+# define VMEMMAP_START		__VMEMMAP_BASE
 #endif /* CONFIG_RANDOMIZE_MEMORY */
-#define VMALLOC_END	(VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
-#define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
+
+#define VMALLOC_END		(VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
+
+#define MODULES_VADDR		(__START_KERNEL_map + KERNEL_IMAGE_SIZE)
 /* The module sections ends with the start of the fixmap */
-#define MODULES_END   __fix_to_virt(__end_of_fixed_addresses + 1)
-#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
-#define ESPFIX_PGD_ENTRY _AC(-2, UL)
-#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
-#define EFI_VA_START	 ( -4 * (_AC(1, UL) << 30))
-#define EFI_VA_END	 (-68 * (_AC(1, UL) << 30))
+#define MODULES_END		__fix_to_virt(__end_of_fixed_addresses + 1)
+#define MODULES_LEN		(MODULES_END - MODULES_VADDR)
+
+#define ESPFIX_PGD_ENTRY	_AC(-2, UL)
+#define ESPFIX_BASE_ADDR	(ESPFIX_PGD_ENTRY << P4D_SHIFT)
+
+#define CPU_ENTRY_AREA_PGD	_AC(-3, UL)
+#define CPU_ENTRY_AREA_BASE	(CPU_ENTRY_AREA_PGD << P4D_SHIFT)
+
+#define EFI_VA_START		( -4 * (_AC(1, UL) << 30))
+#define EFI_VA_END		(-68 * (_AC(1, UL) << 30))
 
 #define EARLY_DYNAMIC_PAGE_TABLES	64
 
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index 43212a43ee69feea1de27275d9075c566cdfcd2c..6a60fea90b9d9dd669033e4d7eee627b99a0c3d5 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -38,6 +38,11 @@
 #define CR3_ADDR_MASK	__sme_clr(0x7FFFFFFFFFFFF000ull)
 #define CR3_PCID_MASK	0xFFFull
 #define CR3_NOFLUSH	BIT_ULL(63)
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define X86_CR3_PTI_SWITCH_BIT	11
+#endif
+
 #else
 /*
  * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index cc16fa882e3e760a40351cf3e7476ac9f25ffe00..d3a67fba200ae2a5c03f52a7815dca00b43c63ad 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -163,9 +163,9 @@ enum cpuid_regs_idx {
 extern struct cpuinfo_x86	boot_cpu_data;
 extern struct cpuinfo_x86	new_cpu_data;
 
-extern struct tss_struct	doublefault_tss;
-extern __u32			cpu_caps_cleared[NCAPINTS];
-extern __u32			cpu_caps_set[NCAPINTS];
+extern struct x86_hw_tss	doublefault_tss;
+extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
+extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
@@ -253,6 +253,11 @@ static inline void load_cr3(pgd_t *pgdir)
 	write_cr3(__sme_pa(pgdir));
 }
 
+/*
+ * Note that while the legacy 'TSS' name comes from 'Task State Segment',
+ * on modern x86 CPUs the TSS also holds information important to 64-bit mode,
+ * unrelated to the task-switch mechanism:
+ */
 #ifdef CONFIG_X86_32
 /* This is the TSS defined by the hardware. */
 struct x86_hw_tss {
@@ -305,7 +310,13 @@ struct x86_hw_tss {
 struct x86_hw_tss {
 	u32			reserved1;
 	u64			sp0;
+
+	/*
+	 * We store cpu_current_top_of_stack in sp1 so it's always accessible.
+	 * Linux does not use ring 1, so sp1 is not otherwise needed.
+	 */
 	u64			sp1;
+
 	u64			sp2;
 	u64			reserved2;
 	u64			ist[7];
@@ -323,12 +334,22 @@ struct x86_hw_tss {
 #define IO_BITMAP_BITS			65536
 #define IO_BITMAP_BYTES			(IO_BITMAP_BITS/8)
 #define IO_BITMAP_LONGS			(IO_BITMAP_BYTES/sizeof(long))
-#define IO_BITMAP_OFFSET		offsetof(struct tss_struct, io_bitmap)
+#define IO_BITMAP_OFFSET		(offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
 #define INVALID_IO_BITMAP_OFFSET	0x8000
 
+struct entry_stack {
+	unsigned long		words[64];
+};
+
+struct entry_stack_page {
+	struct entry_stack stack;
+} __aligned(PAGE_SIZE);
+
 struct tss_struct {
 	/*
-	 * The hardware state:
+	 * The fixed hardware portion.  This must not cross a page boundary
+	 * at risk of violating the SDM's advice and potentially triggering
+	 * errata.
 	 */
 	struct x86_hw_tss	x86_tss;
 
@@ -339,18 +360,9 @@ struct tss_struct {
 	 * be within the limit.
 	 */
 	unsigned long		io_bitmap[IO_BITMAP_LONGS + 1];
+} __aligned(PAGE_SIZE);
 
-#ifdef CONFIG_X86_32
-	/*
-	 * Space for the temporary SYSENTER stack.
-	 */
-	unsigned long		SYSENTER_stack_canary;
-	unsigned long		SYSENTER_stack[64];
-#endif
-
-} ____cacheline_aligned;
-
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
 
 /*
  * sizeof(unsigned long) coming from an extra "long" at the end
@@ -364,6 +376,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
 
 #ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
+#else
+/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
+#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
 #endif
 
 /*
@@ -523,7 +538,7 @@ static inline void native_set_iopl_mask(unsigned mask)
 static inline void
 native_load_sp0(unsigned long sp0)
 {
-	this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 static inline void native_swapgs(void)
@@ -535,12 +550,12 @@ static inline void native_swapgs(void)
 
 static inline unsigned long current_top_of_stack(void)
 {
-#ifdef CONFIG_X86_64
-	return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
-#else
-	/* sp0 on x86_32 is special in and around vm86 mode. */
+	/*
+	 *  We can't read directly from tss.sp0: sp0 on x86_32 is special in
+	 *  and around vm86 mode and sp0 on x86_64 is special because of the
+	 *  entry trampoline.
+	 */
 	return this_cpu_read_stable(cpu_current_top_of_stack);
-#endif
 }
 
 static inline bool on_thread_stack(void)
@@ -837,13 +852,22 @@ static inline void spin_lock_prefetch(const void *x)
 
 #else
 /*
- * User space process size. 47bits minus one guard page.  The guard
- * page is necessary on Intel CPUs: if a SYSCALL instruction is at
- * the highest possible canonical userspace address, then that
- * syscall will enter the kernel with a non-canonical return
- * address, and SYSRET will explode dangerously.  We avoid this
- * particular problem by preventing anything from being mapped
- * at the maximum canonical address.
+ * User space process size.  This is the first address outside the user range.
+ * There are a few constraints that determine this:
+ *
+ * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
+ * address, then that syscall will enter the kernel with a
+ * non-canonical return address, and SYSRET will explode dangerously.
+ * We avoid this particular problem by preventing anything executable
+ * from being mapped at the maximum canonical address.
+ *
+ * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
+ * CPUs malfunction if they execute code from the highest canonical page.
+ * They'll speculate right off the end of the canonical space, and
+ * bad things happen.  This is worked around in the same way as the
+ * Intel problem.
+ *
+ * With page table isolation enabled, we map the LDT in ... [stay tuned]
  */
 #define TASK_SIZE_MAX	((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
 
diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h
new file mode 100644
index 0000000000000000000000000000000000000000..0b5ef05b2d2d9f03a967afe10256a266ef242c98
--- /dev/null
+++ b/arch/x86/include/asm/pti.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _ASM_X86_PTI_H
+#define _ASM_X86_PTI_H
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+extern void pti_init(void);
+extern void pti_check_boottime_disable(void);
+#else
+static inline void pti_check_boottime_disable(void) { }
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_PTI_H */
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 8da111b3c342bbb61a9e630e101c8a83422a15ea..f737068787729f045a578776845231b0a0ee3e0d 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -16,6 +16,7 @@ enum stack_type {
 	STACK_TYPE_TASK,
 	STACK_TYPE_IRQ,
 	STACK_TYPE_SOFTIRQ,
+	STACK_TYPE_ENTRY,
 	STACK_TYPE_EXCEPTION,
 	STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
 };
@@ -28,6 +29,8 @@ struct stack_info {
 bool in_task_stack(unsigned long *stack, struct task_struct *task,
 		   struct stack_info *info);
 
+bool in_entry_stack(unsigned long *stack, struct stack_info *info);
+
 int get_stack_info(unsigned long *stack, struct task_struct *task,
 		   struct stack_info *info, unsigned long *visit_mask);
 
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 8c6bd6863db9d6b737cd0649324c154f9b9798a3..eb5f7999a8934c7af8b6c5b170b05e5772264783 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -16,8 +16,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 		      struct tss_struct *tss);
 
 /* This runs runs on the previous thread's stack. */
-static inline void prepare_switch_to(struct task_struct *prev,
-				     struct task_struct *next)
+static inline void prepare_switch_to(struct task_struct *next)
 {
 #ifdef CONFIG_VMAP_STACK
 	/*
@@ -70,7 +69,7 @@ struct fork_frame {
 
 #define switch_to(prev, next, last)					\
 do {									\
-	prepare_switch_to(prev, next);					\
+	prepare_switch_to(next);					\
 									\
 	((last) = __switch_to_asm((prev), (next)));			\
 } while (0)
@@ -79,10 +78,10 @@ do {									\
 static inline void refresh_sysenter_cs(struct thread_struct *thread)
 {
 	/* Only happens when SEP is enabled, no need to test "SEP"arately: */
-	if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
+	if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
 		return;
 
-	this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
+	this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
 	wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
 }
 #endif
@@ -90,10 +89,12 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
 /* This is used when switching tasks or entering/exiting vm86 mode. */
 static inline void update_sp0(struct task_struct *task)
 {
+	/* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
 #ifdef CONFIG_X86_32
 	load_sp0(task->thread.sp0);
 #else
-	load_sp0(task_top_of_stack(task));
+	if (static_cpu_has(X86_FEATURE_XENPV))
+		load_sp0(task_top_of_stack(task));
 #endif
 }
 
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 70f425947dc50f3e99ca639c0ead0d7e1cce636d..00223333821a96616647a9cbb6fe729c4a18b7b6 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -207,7 +207,7 @@ static inline int arch_within_stack_frames(const void * const stack,
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
 #endif
 
 #endif
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 877b5c1a1b1247116e20e7272dbade77e1874fc4..4a08dd2ab32ade77dcbfff6a8e8b77f5d5e43764 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -9,70 +9,130 @@
 #include <asm/cpufeature.h>
 #include <asm/special_insns.h>
 #include <asm/smp.h>
+#include <asm/invpcid.h>
+#include <asm/pti.h>
+#include <asm/processor-flags.h>
 
-static inline void __invpcid(unsigned long pcid, unsigned long addr,
-			     unsigned long type)
-{
-	struct { u64 d[2]; } desc = { { pcid, addr } };
+/*
+ * The x86 feature is called PCID (Process Context IDentifier). It is similar
+ * to what is traditionally called ASID on the RISC processors.
+ *
+ * We don't use the traditional ASID implementation, where each process/mm gets
+ * its own ASID and flush/restart when we run out of ASID space.
+ *
+ * Instead we have a small per-cpu array of ASIDs and cache the last few mm's
+ * that came by on this CPU, allowing cheaper switch_mm between processes on
+ * this CPU.
+ *
+ * We end up with different spaces for different things. To avoid confusion we
+ * use different names for each of them:
+ *
+ * ASID  - [0, TLB_NR_DYN_ASIDS-1]
+ *         the canonical identifier for an mm
+ *
+ * kPCID - [1, TLB_NR_DYN_ASIDS]
+ *         the value we write into the PCID part of CR3; corresponds to the
+ *         ASID+1, because PCID 0 is special.
+ *
+ * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS]
+ *         for KPTI each mm has two address spaces and thus needs two
+ *         PCID values, but we can still do with a single ASID denomination
+ *         for each mm. Corresponds to kPCID + 2048.
+ *
+ */
 
-	/*
-	 * The memory clobber is because the whole point is to invalidate
-	 * stale TLB entries and, especially if we're flushing global
-	 * mappings, we don't want the compiler to reorder any subsequent
-	 * memory accesses before the TLB flush.
-	 *
-	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
-	 * invpcid (%rcx), %rax in long mode.
-	 */
-	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
-		      : : "m" (desc), "a" (type), "c" (&desc) : "memory");
-}
+/* There are 12 bits of space for ASIDS in CR3 */
+#define CR3_HW_ASID_BITS		12
 
-#define INVPCID_TYPE_INDIV_ADDR		0
-#define INVPCID_TYPE_SINGLE_CTXT	1
-#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
-#define INVPCID_TYPE_ALL_NON_GLOBAL	3
+/*
+ * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for
+ * user/kernel switches
+ */
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define PTI_CONSUMED_PCID_BITS	1
+#else
+# define PTI_CONSUMED_PCID_BITS	0
+#endif
 
-/* Flush all mappings for a given pcid and addr, not including globals. */
-static inline void invpcid_flush_one(unsigned long pcid,
-				     unsigned long addr)
-{
-	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
-}
+#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
+
+/*
+ * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid.  -1 below to account
+ * for them being zero-based.  Another -1 is because PCID 0 is reserved for
+ * use by non-PCID-aware users.
+ */
+#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2)
+
+/*
+ * 6 because 6 should be plenty and struct tlb_state will fit in two cache
+ * lines.
+ */
+#define TLB_NR_DYN_ASIDS	6
 
-/* Flush all mappings for a given PCID, not including globals. */
-static inline void invpcid_flush_single_context(unsigned long pcid)
+/*
+ * Given @asid, compute kPCID
+ */
+static inline u16 kern_pcid(u16 asid)
 {
-	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	/*
+	 * Make sure that the dynamic ASID space does not confict with the
+	 * bit we are using to switch between user and kernel ASIDs.
+	 */
+	BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT));
+
+	/*
+	 * The ASID being passed in here should have respected the
+	 * MAX_ASID_AVAILABLE and thus never have the switch bit set.
+	 */
+	VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT));
+#endif
+	/*
+	 * The dynamically-assigned ASIDs that get passed in are small
+	 * (<TLB_NR_DYN_ASIDS).  They never have the high switch bit set,
+	 * so do not bother to clear it.
+	 *
+	 * If PCID is on, ASID-aware code paths put the ASID+1 into the
+	 * PCID bits.  This serves two purposes.  It prevents a nasty
+	 * situation in which PCID-unaware code saves CR3, loads some other
+	 * value (with PCID == 0), and then restores CR3, thus corrupting
+	 * the TLB for ASID 0 if the saved ASID was nonzero.  It also means
+	 * that any bugs involving loading a PCID-enabled CR3 with
+	 * CR4.PCIDE off will trigger deterministically.
+	 */
+	return asid + 1;
 }
 
-/* Flush all mappings, including globals, for all PCIDs. */
-static inline void invpcid_flush_all(void)
+/*
+ * Given @asid, compute uPCID
+ */
+static inline u16 user_pcid(u16 asid)
 {
-	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+	u16 ret = kern_pcid(asid);
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	ret |= 1 << X86_CR3_PTI_SWITCH_BIT;
+#endif
+	return ret;
 }
 
-/* Flush all mappings for all PCIDs except globals. */
-static inline void invpcid_flush_all_nonglobals(void)
+struct pgd_t;
+static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
 {
-	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
+	if (static_cpu_has(X86_FEATURE_PCID)) {
+		return __sme_pa(pgd) | kern_pcid(asid);
+	} else {
+		VM_WARN_ON_ONCE(asid != 0);
+		return __sme_pa(pgd);
+	}
 }
 
-static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
+static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
 {
-	u64 new_tlb_gen;
-
-	/*
-	 * Bump the generation count.  This also serves as a full barrier
-	 * that synchronizes with switch_mm(): callers are required to order
-	 * their read of mm_cpumask after their writes to the paging
-	 * structures.
-	 */
-	smp_mb__before_atomic();
-	new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
-	smp_mb__after_atomic();
-
-	return new_tlb_gen;
+	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+	VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
+	return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
 }
 
 #ifdef CONFIG_PARAVIRT
@@ -99,12 +159,6 @@ static inline bool tlb_defer_switch_to_init_mm(void)
 	return !static_cpu_has(X86_FEATURE_PCID);
 }
 
-/*
- * 6 because 6 should be plenty and struct tlb_state will fit in
- * two cache lines.
- */
-#define TLB_NR_DYN_ASIDS 6
-
 struct tlb_context {
 	u64 ctx_id;
 	u64 tlb_gen;
@@ -138,6 +192,24 @@ struct tlb_state {
 	 */
 	bool is_lazy;
 
+	/*
+	 * If set we changed the page tables in such a way that we
+	 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
+	 * This tells us to go invalidate all the non-loaded ctxs[]
+	 * on the next context switch.
+	 *
+	 * The current ctx was kept up-to-date as it ran and does not
+	 * need to be invalidated.
+	 */
+	bool invalidate_other;
+
+	/*
+	 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
+	 * the corresponding user PCID needs a flush next time we
+	 * switch to it; see SWITCH_TO_USER_CR3.
+	 */
+	unsigned short user_pcid_flush_mask;
+
 	/*
 	 * Access to this CR4 shadow and to H/W CR4 is protected by
 	 * disabling interrupts when modifying either one.
@@ -218,6 +290,14 @@ static inline unsigned long cr4_read_shadow(void)
 	return this_cpu_read(cpu_tlbstate.cr4);
 }
 
+/*
+ * Mark all other ASIDs as invalid, preserves the current.
+ */
+static inline void invalidate_other_asid(void)
+{
+	this_cpu_write(cpu_tlbstate.invalidate_other, true);
+}
+
 /*
  * Save some of cr4 feature set we're using (e.g.  Pentium 4MB
  * enable and PPro Global page enable), so that any CPU's that boot
@@ -237,37 +317,63 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
 
 extern void initialize_tlbstate_and_flush(void);
 
-static inline void __native_flush_tlb(void)
+/*
+ * Given an ASID, flush the corresponding user ASID.  We can delay this
+ * until the next time we switch to it.
+ *
+ * See SWITCH_TO_USER_CR3.
+ */
+static inline void invalidate_user_asid(u16 asid)
 {
+	/* There is no user ASID if address space separation is off */
+	if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
+		return;
+
 	/*
-	 * If current->mm == NULL then we borrow a mm which may change during a
-	 * task switch and therefore we must not be preempted while we write CR3
-	 * back:
+	 * We only have a single ASID if PCID is off and the CR3
+	 * write will have flushed it.
 	 */
-	preempt_disable();
-	native_write_cr3(__native_read_cr3());
-	preempt_enable();
+	if (!cpu_feature_enabled(X86_FEATURE_PCID))
+		return;
+
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return;
+
+	__set_bit(kern_pcid(asid),
+		  (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask));
 }
 
-static inline void __native_flush_tlb_global_irq_disabled(void)
+/*
+ * flush the entire current user mapping
+ */
+static inline void __native_flush_tlb(void)
 {
-	unsigned long cr4;
+	/*
+	 * Preemption or interrupts must be disabled to protect the access
+	 * to the per CPU variable and to prevent being preempted between
+	 * read_cr3() and write_cr3().
+	 */
+	WARN_ON_ONCE(preemptible());
 
-	cr4 = this_cpu_read(cpu_tlbstate.cr4);
-	/* clear PGE */
-	native_write_cr4(cr4 & ~X86_CR4_PGE);
-	/* write old PGE again and flush TLBs */
-	native_write_cr4(cr4);
+	invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+
+	/* If current->mm == NULL then the read_cr3() "borrows" an mm */
+	native_write_cr3(__native_read_cr3());
 }
 
+/*
+ * flush everything
+ */
 static inline void __native_flush_tlb_global(void)
 {
-	unsigned long flags;
+	unsigned long cr4, flags;
 
 	if (static_cpu_has(X86_FEATURE_INVPCID)) {
 		/*
 		 * Using INVPCID is considerably faster than a pair of writes
 		 * to CR4 sandwiched inside an IRQ flag save/restore.
+		 *
+		 * Note, this works with CR4.PCIDE=0 or 1.
 		 */
 		invpcid_flush_all();
 		return;
@@ -280,36 +386,69 @@ static inline void __native_flush_tlb_global(void)
 	 */
 	raw_local_irq_save(flags);
 
-	__native_flush_tlb_global_irq_disabled();
+	cr4 = this_cpu_read(cpu_tlbstate.cr4);
+	/* toggle PGE */
+	native_write_cr4(cr4 ^ X86_CR4_PGE);
+	/* write old PGE again and flush TLBs */
+	native_write_cr4(cr4);
 
 	raw_local_irq_restore(flags);
 }
 
+/*
+ * flush one page in the user mapping
+ */
 static inline void __native_flush_tlb_single(unsigned long addr)
 {
+	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+
 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return;
+
+	/*
+	 * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1.
+	 * Just use invalidate_user_asid() in case we are called early.
+	 */
+	if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE))
+		invalidate_user_asid(loaded_mm_asid);
+	else
+		invpcid_flush_one(user_pcid(loaded_mm_asid), addr);
 }
 
+/*
+ * flush everything
+ */
 static inline void __flush_tlb_all(void)
 {
-	if (boot_cpu_has(X86_FEATURE_PGE))
+	if (boot_cpu_has(X86_FEATURE_PGE)) {
 		__flush_tlb_global();
-	else
+	} else {
+		/*
+		 * !PGE -> !PCID (setup_pcid()), thus every flush is total.
+		 */
 		__flush_tlb();
-
-	/*
-	 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
-	 * we'd end up flushing kernel translations for the current ASID but
-	 * we might fail to flush kernel translations for other cached ASIDs.
-	 *
-	 * To avoid this issue, we force PCID off if PGE is off.
-	 */
+	}
 }
 
+/*
+ * flush one page in the kernel mapping
+ */
 static inline void __flush_tlb_one(unsigned long addr)
 {
 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
 	__flush_tlb_single(addr);
+
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return;
+
+	/*
+	 * __flush_tlb_single() will have cleared the TLB entry for this ASID,
+	 * but since kernel space is replicated across all, we must also
+	 * invalidate all others.
+	 */
+	invalidate_other_asid();
 }
 
 #define TLB_FLUSH_ALL	-1UL
@@ -370,6 +509,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
 void native_flush_tlb_others(const struct cpumask *cpumask,
 			     const struct flush_tlb_info *info);
 
+static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
+{
+	/*
+	 * Bump the generation count.  This also serves as a full barrier
+	 * that synchronizes with switch_mm(): callers are required to order
+	 * their read of mm_cpumask after their writes to the paging
+	 * structures.
+	 */
+	return atomic64_inc_return(&mm->context.tlb_gen);
+}
+
 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
 					struct mm_struct *mm)
 {
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 84b9ec0c1bc0867795c3a2d327f3b8831bf4f8ba..22647a642e98c41508dab9976a1ff4665b97a4d2 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -283,34 +283,34 @@ TRACE_EVENT(vector_alloc_managed,
 DECLARE_EVENT_CLASS(vector_activate,
 
 	TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve,
-		 bool early),
+		 bool reserve),
 
-	TP_ARGS(irq, is_managed, can_reserve, early),
+	TP_ARGS(irq, is_managed, can_reserve, reserve),
 
 	TP_STRUCT__entry(
 		__field(	unsigned int,	irq		)
 		__field(	bool,		is_managed	)
 		__field(	bool,		can_reserve	)
-		__field(	bool,		early		)
+		__field(	bool,		reserve		)
 	),
 
 	TP_fast_assign(
 		__entry->irq		= irq;
 		__entry->is_managed	= is_managed;
 		__entry->can_reserve	= can_reserve;
-		__entry->early		= early;
+		__entry->reserve	= reserve;
 	),
 
-	TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d",
+	TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d",
 		  __entry->irq, __entry->is_managed, __entry->can_reserve,
-		  __entry->early)
+		  __entry->reserve)
 );
 
 #define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name)				\
 DEFINE_EVENT_FN(vector_activate, name,					\
 	TP_PROTO(unsigned int irq, bool is_managed,			\
-		 bool can_reserve, bool early),				\
-	TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL);	\
+		 bool can_reserve, bool reserve),			\
+	TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL);	\
 
 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate);
 DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 1fadd310ff680ece697fa65a8db410c380a8547e..31051f35cbb768e452c4f76a60c5415a45f572e7 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -75,7 +75,6 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long);
 dotraplinkage void do_stack_segment(struct pt_regs *, long);
 #ifdef CONFIG_X86_64
 dotraplinkage void do_double_fault(struct pt_regs *, long);
-asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
 #endif
 dotraplinkage void do_general_protection(struct pt_regs *, long);
 dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index e9cc6fe1fc6f953c38ddcc61fcf06fd90d72ab04..c1688c2d0a128f063053697dc60bcbfbca509765 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -7,6 +7,9 @@
 #include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
+#define IRET_FRAME_OFFSET (offsetof(struct pt_regs, ip))
+#define IRET_FRAME_SIZE   (sizeof(struct pt_regs) - IRET_FRAME_OFFSET)
+
 struct unwind_state {
 	struct stack_info stack_info;
 	unsigned long stack_mask;
@@ -52,6 +55,10 @@ void unwind_start(struct unwind_state *state, struct task_struct *task,
 }
 
 #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER)
+/*
+ * WARNING: The entire pt_regs may not be safe to dereference.  In some cases,
+ * only the iret frame registers are accessible.  Use with caution!
+ */
 static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
 {
 	if (unwind_done(state))
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d9a7c659009c94b2ee04b4ddcc94e5874cc403b0..b986b2ca688a0e4fa24ace613c7e465e909723fe 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -7,6 +7,7 @@
 
 #ifdef CONFIG_X86_VSYSCALL_EMULATION
 extern void map_vsyscall(void);
+extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
 
 /*
  * Called on instruction fetch fault in vsyscall page.
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 7e1e730396ae08f5a267adaccf0c3ba46448f780..bcba3c643e63dced1c873ee5e1cdbfdd5d307928 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -78,7 +78,12 @@
 #define X86_CR3_PWT		_BITUL(X86_CR3_PWT_BIT)
 #define X86_CR3_PCD_BIT		4 /* Page Cache Disable */
 #define X86_CR3_PCD		_BITUL(X86_CR3_PCD_BIT)
-#define X86_CR3_PCID_MASK	_AC(0x00000fff,UL) /* PCID Mask */
+
+#define X86_CR3_PCID_BITS	12
+#define X86_CR3_PCID_MASK	(_AC((1UL << X86_CR3_PCID_BITS) - 1, UL))
+
+#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
+#define X86_CR3_PCID_NOFLUSH    _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
 
 /*
  * Intel CPU features in CR4
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 6e272f3ea984a220975d1b7c144867840fb867a9..880441f2414610298002c34652b96e571772e08f 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2626,11 +2626,13 @@ static int __init apic_set_verbosity(char *arg)
 		apic_verbosity = APIC_DEBUG;
 	else if (strcmp("verbose", arg) == 0)
 		apic_verbosity = APIC_VERBOSE;
+#ifdef CONFIG_X86_64
 	else {
 		pr_warning("APIC Verbosity level %s not recognised"
 			" use apic=verbose or apic=debug\n", arg);
 		return -EINVAL;
 	}
+#endif
 
 	return 0;
 }
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index aa85690e9b6416b797db8129c8f1cbd7606bf687..25a87028cb3fe9bb4a6e9eba757d50cd49f15620 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -151,7 +151,7 @@ static struct apic apic_flat __ro_after_init = {
 	.apic_id_valid			= default_apic_id_valid,
 	.apic_id_registered		= flat_apic_id_registered,
 
-	.irq_delivery_mode		= dest_LowestPrio,
+	.irq_delivery_mode		= dest_Fixed,
 	.irq_dest_mode			= 1, /* logical */
 
 	.disable_esr			= 0,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 7b659c4480c91a680466cc961cee68754c633ada..5078b5ce63a7aaf4569368e6447c9a4423271109 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -110,7 +110,7 @@ struct apic apic_noop __ro_after_init = {
 	.apic_id_valid			= default_apic_id_valid,
 	.apic_id_registered		= noop_apic_id_registered,
 
-	.irq_delivery_mode		= dest_LowestPrio,
+	.irq_delivery_mode		= dest_Fixed,
 	/* logical delivery broadcast to all CPUs: */
 	.irq_dest_mode			= 1,
 
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 201579dc52428edb5c3989102a432c14799d1e1f..8a79634214600ab02076cfb601dac780996b6f3f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2988,7 +2988,7 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
 }
 
 int mp_irqdomain_activate(struct irq_domain *domain,
-			  struct irq_data *irq_data, bool early)
+			  struct irq_data *irq_data, bool reserve)
 {
 	unsigned long flags;
 
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 9b18be76442236eab971cab86cbf366f9e1e134f..ce503c99f5c4cd67fddb5fafdda7e3e64b8e7690 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -39,17 +39,13 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
 		((apic->irq_dest_mode == 0) ?
 			MSI_ADDR_DEST_MODE_PHYSICAL :
 			MSI_ADDR_DEST_MODE_LOGICAL) |
-		((apic->irq_delivery_mode != dest_LowestPrio) ?
-			MSI_ADDR_REDIRECTION_CPU :
-			MSI_ADDR_REDIRECTION_LOWPRI) |
+		MSI_ADDR_REDIRECTION_CPU |
 		MSI_ADDR_DEST_ID(cfg->dest_apicid);
 
 	msg->data =
 		MSI_DATA_TRIGGER_EDGE |
 		MSI_DATA_LEVEL_ASSERT |
-		((apic->irq_delivery_mode != dest_LowestPrio) ?
-			MSI_DATA_DELIVERY_FIXED :
-			MSI_DATA_DELIVERY_LOWPRI) |
+		MSI_DATA_DELIVERY_FIXED |
 		MSI_DATA_VECTOR(cfg->vector);
 }
 
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index fa22017de806504b79593ead951bffe4e5d5421b..02e8acb134f856faa0030bf1c007dc3ac1bd9ca3 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -105,7 +105,7 @@ static struct apic apic_default __ro_after_init = {
 	.apic_id_valid			= default_apic_id_valid,
 	.apic_id_registered		= default_apic_id_registered,
 
-	.irq_delivery_mode		= dest_LowestPrio,
+	.irq_delivery_mode		= dest_Fixed,
 	/* logical delivery broadcast to all CPUs: */
 	.irq_dest_mode			= 1,
 
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 750449152b04b4feed3c98ddc772db91f003f818..f8b03bb8e72560a436dbad677ddeed30f19531cf 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -184,6 +184,7 @@ static void reserve_irq_vector_locked(struct irq_data *irqd)
 	irq_matrix_reserve(vector_matrix);
 	apicd->can_reserve = true;
 	apicd->has_reserved = true;
+	irqd_set_can_reserve(irqd);
 	trace_vector_reserve(irqd->irq, 0);
 	vector_assign_managed_shutdown(irqd);
 }
@@ -368,8 +369,18 @@ static int activate_reserved(struct irq_data *irqd)
 	int ret;
 
 	ret = assign_irq_vector_any_locked(irqd);
-	if (!ret)
+	if (!ret) {
 		apicd->has_reserved = false;
+		/*
+		 * Core might have disabled reservation mode after
+		 * allocating the irq descriptor. Ideally this should
+		 * happen before allocation time, but that would require
+		 * completely convoluted ways of transporting that
+		 * information.
+		 */
+		if (!irqd_can_reserve(irqd))
+			apicd->can_reserve = false;
+	}
 	return ret;
 }
 
@@ -398,21 +409,21 @@ static int activate_managed(struct irq_data *irqd)
 }
 
 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
-			       bool early)
+			       bool reserve)
 {
 	struct apic_chip_data *apicd = apic_chip_data(irqd);
 	unsigned long flags;
 	int ret = 0;
 
 	trace_vector_activate(irqd->irq, apicd->is_managed,
-			      apicd->can_reserve, early);
+			      apicd->can_reserve, reserve);
 
 	/* Nothing to do for fixed assigned vectors */
 	if (!apicd->can_reserve && !apicd->is_managed)
 		return 0;
 
 	raw_spin_lock_irqsave(&vector_lock, flags);
-	if (early || irqd_is_managed_and_shutdown(irqd))
+	if (reserve || irqd_is_managed_and_shutdown(irqd))
 		vector_assign_managed_shutdown(irqd);
 	else if (apicd->is_managed)
 		ret = activate_managed(irqd);
@@ -478,6 +489,7 @@ static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
 	} else {
 		/* Release the vector */
 		apicd->can_reserve = true;
+		irqd_set_can_reserve(irqd);
 		clear_irq_vector(irqd);
 		realloc = true;
 	}
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 622f13ca8a943c270e70df6100561ce4c56d7db2..8b04234e010b2616e42bff6cbeabdc3c2087c13c 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -184,7 +184,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = {
 	.apic_id_valid			= x2apic_apic_id_valid,
 	.apic_id_registered		= x2apic_apic_id_registered,
 
-	.irq_delivery_mode		= dest_LowestPrio,
+	.irq_delivery_mode		= dest_Fixed,
 	.irq_dest_mode			= 1, /* logical */
 
 	.disable_esr			= 0,
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 8ea78275480dafeb702e11ba73364cd9e7c52f21..76417a9aab73c3f7e3376261eb9ec592b16adf3b 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -17,6 +17,7 @@
 #include <asm/sigframe.h>
 #include <asm/bootparam.h>
 #include <asm/suspend.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_XEN
 #include <xen/interface/xen.h>
@@ -93,4 +94,13 @@ void common(void) {
 
 	BLANK();
 	DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+
+	/* TLB state for the entry code */
+	OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask);
+
+	/* Layout info for cpu_entry_area */
+	OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
+	OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
+	OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
+	DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
 }
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index dedf428b20b68b0a4748fc1ac3032193c9121362..fa1261eefa16e73cedf27aadb878753be693f919 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -47,13 +47,8 @@ void foo(void)
 	BLANK();
 
 	/* Offset from the sysenter stack to tss.sp0 */
-	DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
-	       offsetofend(struct tss_struct, SYSENTER_stack));
-
-	/* Offset from cpu_tss to SYSENTER_stack */
-	OFFSET(CPU_TSS_SYSENTER_stack, tss_struct, SYSENTER_stack);
-	/* Size of SYSENTER_stack */
-	DEFINE(SIZEOF_SYSENTER_stack, sizeof(((struct tss_struct *)0)->SYSENTER_stack));
+	DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+	       offsetofend(struct cpu_entry_area, entry_stack_page.stack));
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 	BLANK();
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 630212fa9b9da3f0498fc30d4c193c5926c43abb..bf51e51d808dd8914abd3b4bca69b37ce3ec023b 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -23,6 +23,9 @@ int main(void)
 #ifdef CONFIG_PARAVIRT
 	OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
 	OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+#ifdef CONFIG_DEBUG_ENTRY
+	OFFSET(PV_IRQ_save_fl, pv_irq_ops, save_fl);
+#endif
 	BLANK();
 #endif
 
@@ -63,6 +66,7 @@ int main(void)
 
 	OFFSET(TSS_ist, tss_struct, x86_tss.ist);
 	OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
+	OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
 	BLANK();
 
 #ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index fa998ca8aa5aa5b4899dbe8a57c5b543f927009e..c47de4ebf63a3e84a64511662c08d5b20faa94db 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -476,8 +476,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
 	return NULL;		/* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS];
-__u32 cpu_caps_set[NCAPINTS];
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
 
 void load_percpu_segment(int cpu)
 {
@@ -490,28 +490,23 @@ void load_percpu_segment(int cpu)
 	load_stack_canary_segment();
 }
 
-/* Setup the fixmap mapping only once per-processor */
-static inline void setup_fixmap_gdt(int cpu)
-{
-#ifdef CONFIG_X86_64
-	/* On 64-bit systems, we use a read-only fixmap GDT. */
-	pgprot_t prot = PAGE_KERNEL_RO;
-#else
-	/*
-	 * On native 32-bit systems, the GDT cannot be read-only because
-	 * our double fault handler uses a task gate, and entering through
-	 * a task gate needs to change an available TSS to busy.  If the GDT
-	 * is read-only, that will triple fault.
-	 *
-	 * On Xen PV, the GDT must be read-only because the hypervisor requires
-	 * it.
-	 */
-	pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ?
-		PAGE_KERNEL_RO : PAGE_KERNEL;
+#ifdef CONFIG_X86_32
+/* The 32-bit entry code needs to find cpu_entry_area. */
+DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
 #endif
 
-	__set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot);
-}
+#ifdef CONFIG_X86_64
+/*
+ * Special IST stacks which the CPU switches to when it calls
+ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+ * limit), all of them are 4K, except the debug stack which
+ * is 8K.
+ */
+static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
+	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
+};
+#endif
 
 /* Load the original GDT from the per-cpu structure */
 void load_direct_gdt(int cpu)
@@ -747,7 +742,7 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
 {
 	int i;
 
-	for (i = 0; i < NCAPINTS; i++) {
+	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
 		c->x86_capability[i] &= ~cpu_caps_cleared[i];
 		c->x86_capability[i] |= cpu_caps_set[i];
 	}
@@ -927,6 +922,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 	}
 
 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+
+	/* Assume for now that ALL x86 CPUs are insecure */
+	setup_force_cpu_bug(X86_BUG_CPU_INSECURE);
+
 	fpu__init_system(c);
 
 #ifdef CONFIG_X86_32
@@ -1250,7 +1249,7 @@ void enable_sep_cpu(void)
 		return;
 
 	cpu = get_cpu();
-	tss = &per_cpu(cpu_tss, cpu);
+	tss = &per_cpu(cpu_tss_rw, cpu);
 
 	/*
 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
@@ -1259,11 +1258,7 @@ void enable_sep_cpu(void)
 
 	tss->x86_tss.ss1 = __KERNEL_CS;
 	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
-
-	wrmsr(MSR_IA32_SYSENTER_ESP,
-	      (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
-	      0);
-
+	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
 
 	put_cpu();
@@ -1357,25 +1352,22 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
-	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
-	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
-};
-
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
-	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
-
 /* May not be marked __init: used by software suspend */
 void syscall_init(void)
 {
+	extern char _entry_trampoline[];
+	extern char entry_SYSCALL_64_trampoline[];
+
+	int cpu = smp_processor_id();
+	unsigned long SYSCALL64_entry_trampoline =
+		(unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
+		(entry_SYSCALL_64_trampoline - _entry_trampoline);
+
 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
-	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
+	if (static_cpu_has(X86_FEATURE_PTI))
+		wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
+	else
+		wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
 
 #ifdef CONFIG_IA32_EMULATION
 	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
@@ -1386,7 +1378,7 @@ void syscall_init(void)
 	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
 	 */
 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
 #else
 	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
@@ -1530,7 +1522,7 @@ void cpu_init(void)
 	if (cpu)
 		load_ucode_ap();
 
-	t = &per_cpu(cpu_tss, cpu);
+	t = &per_cpu(cpu_tss_rw, cpu);
 	oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1569,7 +1561,7 @@ void cpu_init(void)
 	 * set up and load the per-CPU TSS
 	 */
 	if (!oist->ist[0]) {
-		char *estacks = per_cpu(exception_stacks, cpu);
+		char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
 
 		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
 			estacks += exception_stack_sizes[v];
@@ -1580,7 +1572,7 @@ void cpu_init(void)
 		}
 	}
 
-	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
 	/*
 	 * <= is required because the CPU will access up to
@@ -1596,11 +1588,12 @@ void cpu_init(void)
 	enter_lazy_tlb(&init_mm, me);
 
 	/*
-	 * Initialize the TSS.  Don't bother initializing sp0, as the initial
-	 * task never enters user mode.
+	 * Initialize the TSS.  sp0 points to the entry trampoline stack
+	 * regardless of what task is running.
 	 */
-	set_tss_desc(cpu, t);
+	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 	load_TR_desc();
+	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
 
 	load_mm_ldt(&init_mm);
 
@@ -1612,7 +1605,6 @@ void cpu_init(void)
 	if (is_uv_system())
 		uv_cpu_init();
 
-	setup_fixmap_gdt(cpu);
 	load_fixmap_gdt(cpu);
 }
 
@@ -1622,7 +1614,7 @@ void cpu_init(void)
 {
 	int cpu = smp_processor_id();
 	struct task_struct *curr = current;
-	struct tss_struct *t = &per_cpu(cpu_tss, cpu);
+	struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
 
 	wait_for_master_cpu(cpu);
 
@@ -1657,12 +1649,12 @@ void cpu_init(void)
 	 * Initialize the TSS.  Don't bother initializing sp0, as the initial
 	 * task never enters user mode.
 	 */
-	set_tss_desc(cpu, t);
+	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 	load_TR_desc();
 
 	load_mm_ldt(&init_mm);
 
-	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
 
 #ifdef CONFIG_DOUBLEFAULT
 	/* Set up doublefault TSS pointer in the GDT */
@@ -1674,7 +1666,6 @@ void cpu_init(void)
 
 	fpu__init_cpu();
 
-	setup_fixmap_gdt(cpu);
 	load_fixmap_gdt(cpu);
 }
 #endif
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 7dbcb7adf7975f7f29c38651c23c478ad315a34c..8ccdca6d3f9e9b876ee27f021ed8c021b1168220 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -565,15 +565,6 @@ static void print_ucode(struct ucode_cpu_info *uci)
 }
 #else
 
-/*
- * Flush global tlb. We only do this in x86_64 where paging has been enabled
- * already and PGE should be enabled as well.
- */
-static inline void flush_tlb_early(void)
-{
-	__native_flush_tlb_global_irq_disabled();
-}
-
 static inline void print_ucode(struct ucode_cpu_info *uci)
 {
 	struct microcode_intel *mc;
@@ -602,10 +593,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 	if (rev != mc->hdr.rev)
 		return -1;
 
-#ifdef CONFIG_X86_64
-	/* Flush global tlb. This is precaution. */
-	flush_tlb_early();
-#endif
 	uci->cpu_sig.rev = rev;
 
 	if (early)
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
index 0e662c55ae902fedd5c78c1ed87a972b35a79856..0b8cedb20d6d92f2875a49292680c8cfecd5b044 100644
--- a/arch/x86/kernel/doublefault.c
+++ b/arch/x86/kernel/doublefault.c
@@ -50,25 +50,23 @@ static void doublefault_fn(void)
 		cpu_relax();
 }
 
-struct tss_struct doublefault_tss __cacheline_aligned = {
-	.x86_tss = {
-		.sp0		= STACK_START,
-		.ss0		= __KERNEL_DS,
-		.ldt		= 0,
-		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,
-
-		.ip		= (unsigned long) doublefault_fn,
-		/* 0x2 bit is always set */
-		.flags		= X86_EFLAGS_SF | 0x2,
-		.sp		= STACK_START,
-		.es		= __USER_DS,
-		.cs		= __KERNEL_CS,
-		.ss		= __KERNEL_DS,
-		.ds		= __USER_DS,
-		.fs		= __KERNEL_PERCPU,
-
-		.__cr3		= __pa_nodebug(swapper_pg_dir),
-	}
+struct x86_hw_tss doublefault_tss __cacheline_aligned = {
+	.sp0		= STACK_START,
+	.ss0		= __KERNEL_DS,
+	.ldt		= 0,
+	.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,
+
+	.ip		= (unsigned long) doublefault_fn,
+	/* 0x2 bit is always set */
+	.flags		= X86_EFLAGS_SF | 0x2,
+	.sp		= STACK_START,
+	.es		= __USER_DS,
+	.cs		= __KERNEL_CS,
+	.ss		= __KERNEL_DS,
+	.ds		= __USER_DS,
+	.fs		= __KERNEL_PERCPU,
+
+	.__cr3		= __pa_nodebug(swapper_pg_dir),
 };
 
 /* dummy for do_double_fault() call */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index f13b4c00a5de4b7a7b36c40d27311672bcc9d05c..5fa110699ed275fe81dc9d8f678a2a2f8902017e 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -18,6 +18,7 @@
 #include <linux/nmi.h>
 #include <linux/sysfs.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 
@@ -43,6 +44,24 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
 	return true;
 }
 
+bool in_entry_stack(unsigned long *stack, struct stack_info *info)
+{
+	struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
+
+	void *begin = ss;
+	void *end = ss + 1;
+
+	if ((void *)stack < begin || (void *)stack >= end)
+		return false;
+
+	info->type	= STACK_TYPE_ENTRY;
+	info->begin	= begin;
+	info->end	= end;
+	info->next_sp	= NULL;
+
+	return true;
+}
+
 static void printk_stack_address(unsigned long address, int reliable,
 				 char *log_lvl)
 {
@@ -50,6 +69,28 @@ static void printk_stack_address(unsigned long address, int reliable,
 	printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
 }
 
+void show_iret_regs(struct pt_regs *regs)
+{
+	printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
+	printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
+		regs->sp, regs->flags);
+}
+
+static void show_regs_safe(struct stack_info *info, struct pt_regs *regs)
+{
+	if (on_stack(info, regs, sizeof(*regs)))
+		__show_regs(regs, 0);
+	else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
+			  IRET_FRAME_SIZE)) {
+		/*
+		 * When an interrupt or exception occurs in entry code, the
+		 * full pt_regs might not have been saved yet.  In that case
+		 * just print the iret frame.
+		 */
+		show_iret_regs(regs);
+	}
+}
+
 void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 			unsigned long *stack, char *log_lvl)
 {
@@ -71,31 +112,35 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 	 * - task stack
 	 * - interrupt stack
 	 * - HW exception stacks (double fault, nmi, debug, mce)
+	 * - entry stack
 	 *
-	 * x86-32 can have up to three stacks:
+	 * x86-32 can have up to four stacks:
 	 * - task stack
 	 * - softirq stack
 	 * - hardirq stack
+	 * - entry stack
 	 */
 	for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
 		const char *stack_name;
 
-		/*
-		 * If we overflowed the task stack into a guard page, jump back
-		 * to the bottom of the usable stack.
-		 */
-		if (task_stack_page(task) - (void *)stack < PAGE_SIZE)
-			stack = task_stack_page(task);
-
-		if (get_stack_info(stack, task, &stack_info, &visit_mask))
-			break;
+		if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
+			/*
+			 * We weren't on a valid stack.  It's possible that
+			 * we overflowed a valid stack into a guard page.
+			 * See if the next page up is valid so that we can
+			 * generate some kind of backtrace if this happens.
+			 */
+			stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
+			if (get_stack_info(stack, task, &stack_info, &visit_mask))
+				break;
+		}
 
 		stack_name = stack_type_name(stack_info.type);
 		if (stack_name)
 			printk("%s <%s>\n", log_lvl, stack_name);
 
-		if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-			__show_regs(regs, 0);
+		if (regs)
+			show_regs_safe(&stack_info, regs);
 
 		/*
 		 * Scan the stack, printing any text addresses we find.  At the
@@ -119,7 +164,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
 			/*
 			 * Don't print regs->ip again if it was already printed
-			 * by __show_regs() below.
+			 * by show_regs_safe() below.
 			 */
 			if (regs && stack == &regs->ip)
 				goto next;
@@ -155,8 +200,8 @@ next:
 
 			/* if the frame has entry regs, print them */
 			regs = unwind_get_entry_regs(&state);
-			if (regs && on_stack(&stack_info, regs, sizeof(*regs)))
-				__show_regs(regs, 0);
+			if (regs)
+				show_regs_safe(&stack_info, regs);
 		}
 
 		if (stack_name)
@@ -252,11 +297,13 @@ int __die(const char *str, struct pt_regs *regs, long err)
 	unsigned long sp;
 #endif
 	printk(KERN_DEFAULT
-	       "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter,
+	       "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
 	       IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT"         : "",
 	       IS_ENABLED(CONFIG_SMP)     ? " SMP"             : "",
 	       debug_pagealloc_enabled()  ? " DEBUG_PAGEALLOC" : "",
-	       IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "");
+	       IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "",
+	       IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
+	       (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
 
 	if (notify_die(DIE_OOPS, str, regs, err,
 			current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index daefae83a3aa86c59602b75bd3e6734c6e3b1030..04170f63e3a1d567caac3deea641e014b7e10823 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -26,6 +26,9 @@ const char *stack_type_name(enum stack_type type)
 	if (type == STACK_TYPE_SOFTIRQ)
 		return "SOFTIRQ";
 
+	if (type == STACK_TYPE_ENTRY)
+		return "ENTRY_TRAMPOLINE";
+
 	return NULL;
 }
 
@@ -93,6 +96,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
 	if (task != current)
 		goto unknown;
 
+	if (in_entry_stack(stack, info))
+		goto recursion_check;
+
 	if (in_hardirq_stack(stack, info))
 		goto recursion_check;
 
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 88ce2ffdb110303502ad33e64d357d8af5afd8c6..563e28d14f2ca157178d9de3a139d8370aaf89fe 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -37,6 +37,15 @@ const char *stack_type_name(enum stack_type type)
 	if (type == STACK_TYPE_IRQ)
 		return "IRQ";
 
+	if (type == STACK_TYPE_ENTRY) {
+		/*
+		 * On 64-bit, we have a generic entry stack that we
+		 * use for all the kernel entry points, including
+		 * SYSENTER.
+		 */
+		return "ENTRY_TRAMPOLINE";
+	}
+
 	if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
 		return exception_stack_names[type - STACK_TYPE_EXCEPTION];
 
@@ -115,6 +124,9 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
 	if (in_irq_stack(stack, info))
 		goto recursion_check;
 
+	if (in_entry_stack(stack, info))
+		goto recursion_check;
+
 	goto unknown;
 
 recursion_check:
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7dca675fe78db60c5d79bc450bbd14bfee35cfc2..04a625f0fcda322dab7c9d8459a19ee56b71c936 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -341,6 +341,27 @@ GLOBAL(early_recursion_flag)
 	.balign	PAGE_SIZE; \
 GLOBAL(name)
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+/*
+ * Each PGD needs to be 8k long and 8k aligned.  We do not
+ * ever go out to userspace with these, so we do not
+ * strictly *need* the second page, but this allows us to
+ * have a single set_pgd() implementation that does not
+ * need to worry about whether it has 4k or 8k to work
+ * with.
+ *
+ * This ensures PGDs are 8k long:
+ */
+#define PTI_USER_PGD_FILL	512
+/* This ensures they are 8k-aligned: */
+#define NEXT_PGD_PAGE(name) \
+	.balign 2 * PAGE_SIZE; \
+GLOBAL(name)
+#else
+#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
+#define PTI_USER_PGD_FILL	0
+#endif
+
 /* Automate the creation of 1 to 1 mapping pmd entries */
 #define PMDS(START, PERM, COUNT)			\
 	i = 0 ;						\
@@ -350,13 +371,14 @@ GLOBAL(name)
 	.endr
 
 	__INITDATA
-NEXT_PAGE(early_top_pgt)
+NEXT_PGD_PAGE(early_top_pgt)
 	.fill	511,8,0
 #ifdef CONFIG_X86_5LEVEL
 	.quad	level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 #else
 	.quad	level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 #endif
+	.fill	PTI_USER_PGD_FILL,8,0
 
 NEXT_PAGE(early_dynamic_pgts)
 	.fill	512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -364,13 +386,14 @@ NEXT_PAGE(early_dynamic_pgts)
 	.data
 
 #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
-NEXT_PAGE(init_top_pgt)
+NEXT_PGD_PAGE(init_top_pgt)
 	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
 	.org    init_top_pgt + PGD_PAGE_OFFSET*8, 0
 	.quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
 	.org    init_top_pgt + PGD_START_KERNEL*8, 0
 	/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
 	.quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+	.fill	PTI_USER_PGD_FILL,8,0
 
 NEXT_PAGE(level3_ident_pgt)
 	.quad	level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
@@ -381,8 +404,9 @@ NEXT_PAGE(level2_ident_pgt)
 	 */
 	PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
 #else
-NEXT_PAGE(init_top_pgt)
+NEXT_PGD_PAGE(init_top_pgt)
 	.fill	512,8,0
+	.fill	PTI_USER_PGD_FILL,8,0
 #endif
 
 #ifdef CONFIG_X86_5LEVEL
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index 3feb648781c470a7a49ee26749712ba7da891fe9..2f723301eb58fc5ad0d6796b342446ae2ee0c9e6 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -67,7 +67,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
 	 * because the ->io_bitmap_max value must match the bitmap
 	 * contents:
 	 */
-	tss = &per_cpu(cpu_tss, get_cpu());
+	tss = &per_cpu(cpu_tss_rw, get_cpu());
 
 	if (turn_on)
 		bitmap_clear(t->io_bitmap_ptr, from, num);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 49cfd9fe7589fa5ef2bef5d4a5d6431b7007836f..68e1867cca8045d0ed728ffc6b75a866c25484ed 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -219,18 +219,6 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
 	/* high bit used in ret_from_ code  */
 	unsigned vector = ~regs->orig_ax;
 
-	/*
-	 * NB: Unlike exception entries, IRQ entries do not reliably
-	 * handle context tracking in the low-level entry code.  This is
-	 * because syscall entries execute briefly with IRQs on before
-	 * updating context tracking state, so we can take an IRQ from
-	 * kernel mode with CONTEXT_USER.  The low-level entry code only
-	 * updates the context if we came from user mode, so we won't
-	 * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
-	 * code is cleaned up enough that we can cleanly defer enabling
-	 * IRQs.
-	 */
-
 	entering_irq();
 
 	/* entering_irq() tells RCU that we're not quiescent.  Check it. */
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 020efbf5786b35d343a8632cd14ac4f800465d9b..d86e344f5b3debfed504b72a7c0f83f36fe16387 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -57,10 +57,10 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 	if (regs->sp >= estack_top && regs->sp <= estack_bottom)
 		return;
 
-	WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
+	WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n",
 		current->comm, curbase, regs->sp,
 		irq_stack_top, irq_stack_bottom,
-		estack_top, estack_bottom);
+		estack_top, estack_bottom, (void *)regs->ip);
 
 	if (sysctl_panic_on_stackoverflow)
 		panic("low stack detected by irq handler - check messages\n");
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 1c1eae9613406b14c3154065e1fd036f985a384c..26d713ecad34a847e650638d02442eda04379f5e 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -5,6 +5,11 @@
  * Copyright (C) 2002 Andi Kleen
  *
  * This handles calls from both 32bit and 64bit mode.
+ *
+ * Lock order:
+ *	contex.ldt_usr_sem
+ *	  mmap_sem
+ *	    context.lock
  */
 
 #include <linux/errno.h>
@@ -19,6 +24,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/ldt.h>
+#include <asm/tlb.h>
 #include <asm/desc.h>
 #include <asm/mmu_context.h>
 #include <asm/syscalls.h>
@@ -42,17 +48,15 @@ static void refresh_ldt_segments(void)
 #endif
 }
 
-/* context.lock is held for us, so we don't need any locking. */
+/* context.lock is held by the task which issued the smp function call */
 static void flush_ldt(void *__mm)
 {
 	struct mm_struct *mm = __mm;
-	mm_context_t *pc;
 
 	if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
 		return;
 
-	pc = &mm->context;
-	set_ldt(pc->ldt->entries, pc->ldt->nr_entries);
+	load_mm_ldt(mm);
 
 	refresh_ldt_segments();
 }
@@ -89,25 +93,143 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries)
 		return NULL;
 	}
 
+	/* The new LDT isn't aliased for PTI yet. */
+	new_ldt->slot = -1;
+
 	new_ldt->nr_entries = num_entries;
 	return new_ldt;
 }
 
+/*
+ * If PTI is enabled, this maps the LDT into the kernelmode and
+ * usermode tables for the given mm.
+ *
+ * There is no corresponding unmap function.  Even if the LDT is freed, we
+ * leave the PTEs around until the slot is reused or the mm is destroyed.
+ * This is harmless: the LDT is always in ordinary memory, and no one will
+ * access the freed slot.
+ *
+ * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
+ * it useful, and the flush would slow down modify_ldt().
+ */
+static int
+map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	bool is_vmalloc, had_top_level_entry;
+	unsigned long va;
+	spinlock_t *ptl;
+	pgd_t *pgd;
+	int i;
+
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return 0;
+
+	/*
+	 * Any given ldt_struct should have map_ldt_struct() called at most
+	 * once.
+	 */
+	WARN_ON(ldt->slot != -1);
+
+	/*
+	 * Did we already have the top level entry allocated?  We can't
+	 * use pgd_none() for this because it doens't do anything on
+	 * 4-level page table kernels.
+	 */
+	pgd = pgd_offset(mm, LDT_BASE_ADDR);
+	had_top_level_entry = (pgd->pgd != 0);
+
+	is_vmalloc = is_vmalloc_addr(ldt->entries);
+
+	for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
+		unsigned long offset = i << PAGE_SHIFT;
+		const void *src = (char *)ldt->entries + offset;
+		unsigned long pfn;
+		pte_t pte, *ptep;
+
+		va = (unsigned long)ldt_slot_va(slot) + offset;
+		pfn = is_vmalloc ? vmalloc_to_pfn(src) :
+			page_to_pfn(virt_to_page(src));
+		/*
+		 * Treat the PTI LDT range as a *userspace* range.
+		 * get_locked_pte() will allocate all needed pagetables
+		 * and account for them in this mm.
+		 */
+		ptep = get_locked_pte(mm, va, &ptl);
+		if (!ptep)
+			return -ENOMEM;
+		/*
+		 * Map it RO so the easy to find address is not a primary
+		 * target via some kernel interface which misses a
+		 * permission check.
+		 */
+		pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL));
+		set_pte_at(mm, va, ptep, pte);
+		pte_unmap_unlock(ptep, ptl);
+	}
+
+	if (mm->context.ldt) {
+		/*
+		 * We already had an LDT.  The top-level entry should already
+		 * have been allocated and synchronized with the usermode
+		 * tables.
+		 */
+		WARN_ON(!had_top_level_entry);
+		if (static_cpu_has(X86_FEATURE_PTI))
+			WARN_ON(!kernel_to_user_pgdp(pgd)->pgd);
+	} else {
+		/*
+		 * This is the first time we're mapping an LDT for this process.
+		 * Sync the pgd to the usermode tables.
+		 */
+		WARN_ON(had_top_level_entry);
+		if (static_cpu_has(X86_FEATURE_PTI)) {
+			WARN_ON(kernel_to_user_pgdp(pgd)->pgd);
+			set_pgd(kernel_to_user_pgdp(pgd), *pgd);
+		}
+	}
+
+	va = (unsigned long)ldt_slot_va(slot);
+	flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0);
+
+	ldt->slot = slot;
+#endif
+	return 0;
+}
+
+static void free_ldt_pgtables(struct mm_struct *mm)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	struct mmu_gather tlb;
+	unsigned long start = LDT_BASE_ADDR;
+	unsigned long end = start + (1UL << PGDIR_SHIFT);
+
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return;
+
+	tlb_gather_mmu(&tlb, mm, start, end);
+	free_pgd_range(&tlb, start, end, start, end);
+	tlb_finish_mmu(&tlb, start, end);
+#endif
+}
+
 /* After calling this, the LDT is immutable. */
 static void finalize_ldt_struct(struct ldt_struct *ldt)
 {
 	paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
 }
 
-/* context.lock is held */
-static void install_ldt(struct mm_struct *current_mm,
-			struct ldt_struct *ldt)
+static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
 {
+	mutex_lock(&mm->context.lock);
+
 	/* Synchronizes with READ_ONCE in load_mm_ldt. */
-	smp_store_release(&current_mm->context.ldt, ldt);
+	smp_store_release(&mm->context.ldt, ldt);
 
-	/* Activate the LDT for all CPUs using current_mm. */
-	on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
+	/* Activate the LDT for all CPUs using currents mm. */
+	on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
+
+	mutex_unlock(&mm->context.lock);
 }
 
 static void free_ldt_struct(struct ldt_struct *ldt)
@@ -124,27 +246,20 @@ static void free_ldt_struct(struct ldt_struct *ldt)
 }
 
 /*
- * we do not have to muck with descriptors here, that is
- * done in switch_mm() as needed.
+ * Called on fork from arch_dup_mmap(). Just copy the current LDT state,
+ * the new task is not running, so nothing can be installed.
  */
-int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
+int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
 {
 	struct ldt_struct *new_ldt;
-	struct mm_struct *old_mm;
 	int retval = 0;
 
-	mutex_init(&mm->context.lock);
-	old_mm = current->mm;
-	if (!old_mm) {
-		mm->context.ldt = NULL;
+	if (!old_mm)
 		return 0;
-	}
 
 	mutex_lock(&old_mm->context.lock);
-	if (!old_mm->context.ldt) {
-		mm->context.ldt = NULL;
+	if (!old_mm->context.ldt)
 		goto out_unlock;
-	}
 
 	new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries);
 	if (!new_ldt) {
@@ -156,6 +271,12 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
 	       new_ldt->nr_entries * LDT_ENTRY_SIZE);
 	finalize_ldt_struct(new_ldt);
 
+	retval = map_ldt_struct(mm, new_ldt, 0);
+	if (retval) {
+		free_ldt_pgtables(mm);
+		free_ldt_struct(new_ldt);
+		goto out_unlock;
+	}
 	mm->context.ldt = new_ldt;
 
 out_unlock:
@@ -174,13 +295,18 @@ void destroy_context_ldt(struct mm_struct *mm)
 	mm->context.ldt = NULL;
 }
 
+void ldt_arch_exit_mmap(struct mm_struct *mm)
+{
+	free_ldt_pgtables(mm);
+}
+
 static int read_ldt(void __user *ptr, unsigned long bytecount)
 {
 	struct mm_struct *mm = current->mm;
 	unsigned long entries_size;
 	int retval;
 
-	mutex_lock(&mm->context.lock);
+	down_read(&mm->context.ldt_usr_sem);
 
 	if (!mm->context.ldt) {
 		retval = 0;
@@ -209,7 +335,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
 	retval = bytecount;
 
 out_unlock:
-	mutex_unlock(&mm->context.lock);
+	up_read(&mm->context.ldt_usr_sem);
 	return retval;
 }
 
@@ -269,7 +395,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 			ldt.avl = 0;
 	}
 
-	mutex_lock(&mm->context.lock);
+	if (down_write_killable(&mm->context.ldt_usr_sem))
+		return -EINTR;
 
 	old_ldt       = mm->context.ldt;
 	old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
@@ -286,12 +413,31 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 	new_ldt->entries[ldt_info.entry_number] = ldt;
 	finalize_ldt_struct(new_ldt);
 
+	/*
+	 * If we are using PTI, map the new LDT into the userspace pagetables.
+	 * If there is already an LDT, use the other slot so that other CPUs
+	 * will continue to use the old LDT until install_ldt() switches
+	 * them over to the new LDT.
+	 */
+	error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
+	if (error) {
+		/*
+		 * This only can fail for the first LDT setup. If an LDT is
+		 * already installed then the PTE page is already
+		 * populated. Mop up a half populated page table.
+		 */
+		if (!WARN_ON_ONCE(old_ldt))
+			free_ldt_pgtables(mm);
+		free_ldt_struct(new_ldt);
+		goto out_unlock;
+	}
+
 	install_ldt(mm, new_ldt);
 	free_ldt_struct(old_ldt);
 	error = 0;
 
 out_unlock:
-	mutex_unlock(&mm->context.lock);
+	up_write(&mm->context.ldt_usr_sem);
 out:
 	return error;
 }
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 00bc751c861ce8fa42c1b93d39d029694f4e328d..edfede76868870e4cf86fed553377fd6bd0cac06 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -48,8 +48,6 @@ static void load_segments(void)
 		"\tmovl $"STR(__KERNEL_DS)",%%eax\n"
 		"\tmovl %%eax,%%ds\n"
 		"\tmovl %%eax,%%es\n"
-		"\tmovl %%eax,%%fs\n"
-		"\tmovl %%eax,%%gs\n"
 		"\tmovl %%eax,%%ss\n"
 		: : : "eax", "memory");
 #undef STR
@@ -232,8 +230,8 @@ void machine_kexec(struct kimage *image)
 	 * The gdt & idt are now invalid.
 	 * If you want to load them you must set up your own idt & gdt.
 	 */
-	set_gdt(phys_to_virt(0), 0);
 	idt_invalidate(phys_to_virt(0));
+	set_gdt(phys_to_virt(0), 0);
 
 	/* now call it */
 	image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index ac0be8283325edfdc2752f862b4c0cef208a931c..9edadabf04f66c657f8a29bb56fe994b2559d5cf 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -60,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
 		PATCH_SITE(pv_mmu_ops, read_cr2);
 		PATCH_SITE(pv_mmu_ops, read_cr3);
 		PATCH_SITE(pv_mmu_ops, write_cr3);
-		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
 		PATCH_SITE(pv_cpu_ops, wbinvd);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index bb988a24db927d758f9120d45f90d1c160628790..aed9d94bd46f41bb049b8e0153a44a43d97e80b4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -47,7 +47,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
 	.x86_tss = {
 		/*
 		 * .sp0 is only used when entering ring 0 from a lower
@@ -56,6 +56,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
 		 * Poison it.
 		 */
 		.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+		/*
+		 * .sp1 is cpu_current_top_of_stack.  The init task never
+		 * runs user code, but cpu_current_top_of_stack should still
+		 * be well defined before the first context switch.
+		 */
+		.sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
 		.ss0 = __KERNEL_DS,
 		.ss1 = __KERNEL_CS,
@@ -71,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
 	  */
 	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
-	.SYSENTER_stack_canary	= STACK_END_MAGIC,
-#endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -104,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
 	struct fpu *fpu = &t->fpu;
 
 	if (bp) {
-		struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+		struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
 		t->io_bitmap_ptr = NULL;
 		clear_thread_flag(TIF_IO_BITMAP);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 45bf0c5f93e15103060d67d5245756ab72ce8fe5..5224c609918416337b97440eb2d515d8052463ae 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	struct fpu *prev_fpu = &prev->fpu;
 	struct fpu *next_fpu = &next->fpu;
 	int cpu = smp_processor_id();
-	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+	struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index eeeb34f85c250e8c01188b6d32cf5a62bd1af8a0..c754662320163107ca3a254362ce0e404a8d3c11 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -69,9 +69,8 @@ void __show_regs(struct pt_regs *regs, int all)
 	unsigned int fsindex, gsindex;
 	unsigned int ds, cs, es;
 
-	printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs, (void *)regs->ip);
-	printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
-		regs->sp, regs->flags);
+	show_iret_regs(regs);
+
 	if (regs->orig_ax != -1)
 		pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
 	else
@@ -88,6 +87,9 @@ void __show_regs(struct pt_regs *regs, int all)
 	printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
 	       regs->r13, regs->r14, regs->r15);
 
+	if (!all)
+		return;
+
 	asm("movl %%ds,%0" : "=r" (ds));
 	asm("movl %%cs,%0" : "=r" (cs));
 	asm("movl %%es,%0" : "=r" (es));
@@ -98,9 +100,6 @@ void __show_regs(struct pt_regs *regs, int all)
 	rdmsrl(MSR_GS_BASE, gs);
 	rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
 
-	if (!all)
-		return;
-
 	cr0 = read_cr0();
 	cr2 = read_cr2();
 	cr3 = __read_cr3();
@@ -400,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	struct fpu *prev_fpu = &prev->fpu;
 	struct fpu *next_fpu = &next->fpu;
 	int cpu = smp_processor_id();
-	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+	struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
 		     this_cpu_read(irq_count) != -1);
@@ -462,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	 * Switch the PDA and FPU contexts.
 	 */
 	this_cpu_write(current_task, next_p);
+	this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
 
 	/* Reload sp0. */
 	update_sp0(next_p);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 35cb20994e32d2bf05f0b1510ccc26cc7e7590a5..ed556d50d7ed6d71f03d202ef84a6b6b54e95a02 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -126,25 +126,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 	spin_lock_irqsave(&rtc_lock, flags);
 	CMOS_WRITE(0xa, 0xf);
 	spin_unlock_irqrestore(&rtc_lock, flags);
-	local_flush_tlb();
-	pr_debug("1.\n");
 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
 							start_eip >> 4;
-	pr_debug("2.\n");
 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
 							start_eip & 0xf;
-	pr_debug("3.\n");
 }
 
 static inline void smpboot_restore_warm_reset_vector(void)
 {
 	unsigned long flags;
 
-	/*
-	 * Install writable page 0 entry to set BIOS data area.
-	 */
-	local_flush_tlb();
-
 	/*
 	 * Paranoid:  Set warm reset code and vector here back
 	 * to default values.
@@ -932,12 +923,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
 	initial_code = (unsigned long)start_secondary;
 	initial_stack  = idle->thread.sp;
 
-	/*
-	 * Enable the espfix hack for this CPU
-	*/
-#ifdef CONFIG_X86_ESPFIX64
+	/* Enable the espfix hack for this CPU */
 	init_espfix_ap(cpu);
-#endif
 
 	/* So we see what's up */
 	announce_cpu(cpu, apicid);
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 77835bc021c766744dd1976b4429141e3fb645fa..20161ef53537c760d858388295f57d21ae2f7346 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -164,8 +164,12 @@ int save_stack_trace_tsk_reliable(struct task_struct *tsk,
 {
 	int ret;
 
+	/*
+	 * If the task doesn't have a stack (e.g., a zombie), the stack is
+	 * "reliably" empty.
+	 */
 	if (!try_get_task_stack(tsk))
-		return -EINVAL;
+		return 0;
 
 	ret = __save_stack_trace_reliable(trace, tsk);
 
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 9a9c9b076955dd493c7de80b8d814a27dd0fceb7..a5b802a1221272402b344d75ccf94bcff4b69b38 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -93,17 +93,10 @@ static void set_tls_desc(struct task_struct *p, int idx,
 	cpu = get_cpu();
 
 	while (n-- > 0) {
-		if (LDT_empty(info) || LDT_zero(info)) {
+		if (LDT_empty(info) || LDT_zero(info))
 			memset(desc, 0, sizeof(*desc));
-		} else {
+		else
 			fill_ldt(desc, info);
-
-			/*
-			 * Always set the accessed bit so that the CPU
-			 * doesn't try to write to the (read-only) GDT.
-			 */
-			desc->type |= 1;
-		}
 		++info;
 		++desc;
 	}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 989514c94a55d8fa93a07192edd199be1a607bf8..446c9ef8cfc32b68d77d4429128b3c794b3fc069 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -51,6 +51,7 @@
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/fpu/internal.h>
+#include <asm/cpu_entry_area.h>
 #include <asm/mce.h>
 #include <asm/fixmap.h>
 #include <asm/mach_traps.h>
@@ -348,23 +349,42 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
 	/*
 	 * If IRET takes a non-IST fault on the espfix64 stack, then we
-	 * end up promoting it to a doublefault.  In that case, modify
-	 * the stack to make it look like we just entered the #GP
-	 * handler from user space, similar to bad_iret.
+	 * end up promoting it to a doublefault.  In that case, take
+	 * advantage of the fact that we're not using the normal (TSS.sp0)
+	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
+	 * and then modify our own IRET frame so that, when we return,
+	 * we land directly at the #GP(0) vector with the stack already
+	 * set up according to its expectations.
+	 *
+	 * The net result is that our #GP handler will think that we
+	 * entered from usermode with the bad user context.
 	 *
 	 * No need for ist_enter here because we don't use RCU.
 	 */
-	if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
+	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
 		regs->cs == __KERNEL_CS &&
 		regs->ip == (unsigned long)native_irq_return_iret)
 	{
-		struct pt_regs *normal_regs = task_pt_regs(current);
+		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
-		/* Fake a #GP(0) from userspace. */
-		memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
-		normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
+		/*
+		 * regs->sp points to the failing IRET frame on the
+		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
+		 * in gpregs->ss through gpregs->ip.
+		 *
+		 */
+		memmove(&gpregs->ip, (void *)regs->sp, 5*8);
+		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
+
+		/*
+		 * Adjust our frame so that we return straight to the #GP
+		 * vector with the expected RSP value.  This is safe because
+		 * we won't enable interupts or schedule before we invoke
+		 * general_protection, so nothing will clobber the stack
+		 * frame we just set up.
+		 */
 		regs->ip = (unsigned long)general_protection;
-		regs->sp = (unsigned long)&normal_regs->orig_ax;
+		regs->sp = (unsigned long)&gpregs->orig_ax;
 
 		return;
 	}
@@ -389,7 +409,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 	 *
 	 *   Processors update CR2 whenever a page fault is detected. If a
 	 *   second page fault occurs while an earlier page fault is being
-	 *   deliv- ered, the faulting linear address of the second fault will
+	 *   delivered, the faulting linear address of the second fault will
 	 *   overwrite the contents of CR2 (replacing the previous
 	 *   address). These updates to CR2 occur even if the page fault
 	 *   results in a double fault or occurs during the delivery of a
@@ -605,14 +625,15 @@ NOKPROBE_SYMBOL(do_int3);
 
 #ifdef CONFIG_X86_64
 /*
- * Help handler running on IST stack to switch off the IST stack if the
- * interrupted code was in user mode. The actual stack switch is done in
- * entry_64.S
+ * Help handler running on a per-cpu (IST or entry trampoline) stack
+ * to switch to the normal thread stack if the interrupted code was in
+ * user mode. The actual stack switch is done in entry_64.S
  */
 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
-	struct pt_regs *regs = task_pt_regs(current);
-	*regs = *eregs;
+	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
+	if (regs != eregs)
+		*regs = *eregs;
 	return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
@@ -628,13 +649,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 	/*
 	 * This is called from entry_64.S early in handling a fault
 	 * caused by a bad iret to user mode.  To handle the fault
-	 * correctly, we want move our stack frame to task_pt_regs
-	 * and we want to pretend that the exception came from the
-	 * iret target.
+	 * correctly, we want to move our stack frame to where it would
+	 * be had we entered directly on the entry stack (rather than
+	 * just below the IRET frame) and we want to pretend that the
+	 * exception came from the IRET target.
 	 */
 	struct bad_iret_stack *new_stack =
-		container_of(task_pt_regs(current),
-			     struct bad_iret_stack, regs);
+		(struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
 	/* Copy the IRET target to the new stack. */
 	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
@@ -795,14 +816,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
 	debug_stack_usage_dec();
 
 exit:
-#if defined(CONFIG_X86_32)
-	/*
-	 * This is the most likely code path that involves non-trivial use
-	 * of the SYSENTER stack.  Check that we haven't overrun it.
-	 */
-	WARN(this_cpu_read(cpu_tss.SYSENTER_stack_canary) != STACK_END_MAGIC,
-	     "Overran or corrupted SYSENTER stack\n");
-#endif
 	ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
@@ -929,6 +942,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 
 void __init trap_init(void)
 {
+	/* Init cpu_entry_area before IST entries are set up */
+	setup_cpu_entry_areas();
+
 	idt_setup_traps();
 
 	/*
@@ -936,8 +952,9 @@ void __init trap_init(void)
 	 * "sidt" instruction will not leak the location of the kernel, and
 	 * to defend the IDT against arbitrary memory write vulnerabilities.
 	 * It will be reloaded in cpu_init() */
-	__set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
-	idt_descr.address = fix_to_virt(FIX_RO_IDT);
+	cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
+		    PAGE_KERNEL_RO);
+	idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
 
 	/*
 	 * Should be a barrier for any external CPU state:
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index a3f973b2c97a03b121fe0173dbdc9298216721e6..be86a865087a6b9dc8e04031dbf2e2fbeeda1ed5 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -253,22 +253,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
 	return NULL;
 }
 
-static bool stack_access_ok(struct unwind_state *state, unsigned long addr,
+static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
 			    size_t len)
 {
 	struct stack_info *info = &state->stack_info;
+	void *addr = (void *)_addr;
 
-	/*
-	 * If the address isn't on the current stack, switch to the next one.
-	 *
-	 * We may have to traverse multiple stacks to deal with the possibility
-	 * that info->next_sp could point to an empty stack and the address
-	 * could be on a subsequent stack.
-	 */
-	while (!on_stack(info, (void *)addr, len))
-		if (get_stack_info(info->next_sp, state->task, info,
-				   &state->stack_mask))
-			return false;
+	if (!on_stack(info, addr, len) &&
+	    (get_stack_info(addr, state->task, info, &state->stack_mask)))
+		return false;
 
 	return true;
 }
@@ -283,42 +276,32 @@ static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
 	return true;
 }
 
-#define REGS_SIZE (sizeof(struct pt_regs))
-#define SP_OFFSET (offsetof(struct pt_regs, sp))
-#define IRET_REGS_SIZE (REGS_SIZE - offsetof(struct pt_regs, ip))
-#define IRET_SP_OFFSET (SP_OFFSET - offsetof(struct pt_regs, ip))
-
 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
-			     unsigned long *ip, unsigned long *sp, bool full)
+			     unsigned long *ip, unsigned long *sp)
 {
-	size_t regs_size = full ? REGS_SIZE : IRET_REGS_SIZE;
-	size_t sp_offset = full ? SP_OFFSET : IRET_SP_OFFSET;
-	struct pt_regs *regs = (struct pt_regs *)(addr + regs_size - REGS_SIZE);
-
-	if (IS_ENABLED(CONFIG_X86_64)) {
-		if (!stack_access_ok(state, addr, regs_size))
-			return false;
+	struct pt_regs *regs = (struct pt_regs *)addr;
 
-		*ip = regs->ip;
-		*sp = regs->sp;
+	/* x86-32 support will be more complicated due to the &regs->sp hack */
+	BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
 
-		return true;
-	}
-
-	if (!stack_access_ok(state, addr, sp_offset))
+	if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
 		return false;
 
 	*ip = regs->ip;
+	*sp = regs->sp;
+	return true;
+}
 
-	if (user_mode(regs)) {
-		if (!stack_access_ok(state, addr + sp_offset,
-				     REGS_SIZE - SP_OFFSET))
-			return false;
+static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
+				  unsigned long *ip, unsigned long *sp)
+{
+	struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
 
-		*sp = regs->sp;
-	} else
-		*sp = (unsigned long)&regs->sp;
+	if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
+		return false;
 
+	*ip = regs->ip;
+	*sp = regs->sp;
 	return true;
 }
 
@@ -327,7 +310,6 @@ bool unwind_next_frame(struct unwind_state *state)
 	unsigned long ip_p, sp, orig_ip, prev_sp = state->sp;
 	enum stack_type prev_type = state->stack_info.type;
 	struct orc_entry *orc;
-	struct pt_regs *ptregs;
 	bool indirect = false;
 
 	if (unwind_done(state))
@@ -435,7 +417,7 @@ bool unwind_next_frame(struct unwind_state *state)
 		break;
 
 	case ORC_TYPE_REGS:
-		if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) {
+		if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
 			orc_warn("can't dereference registers at %p for ip %pB\n",
 				 (void *)sp, (void *)orig_ip);
 			goto done;
@@ -447,20 +429,14 @@ bool unwind_next_frame(struct unwind_state *state)
 		break;
 
 	case ORC_TYPE_REGS_IRET:
-		if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) {
+		if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
 			orc_warn("can't dereference iret registers at %p for ip %pB\n",
 				 (void *)sp, (void *)orig_ip);
 			goto done;
 		}
 
-		ptregs = container_of((void *)sp, struct pt_regs, ip);
-		if ((unsigned long)ptregs >= prev_sp &&
-		    on_stack(&state->stack_info, ptregs, REGS_SIZE)) {
-			state->regs = ptregs;
-			state->full_regs = false;
-		} else
-			state->regs = NULL;
-
+		state->regs = (void *)sp - IRET_FRAME_OFFSET;
+		state->full_regs = false;
 		state->signal = true;
 		break;
 
@@ -553,8 +529,18 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
 	}
 
 	if (get_stack_info((unsigned long *)state->sp, state->task,
-			   &state->stack_info, &state->stack_mask))
-		return;
+			   &state->stack_info, &state->stack_mask)) {
+		/*
+		 * We weren't on a valid stack.  It's possible that
+		 * we overflowed a valid stack into a guard page.
+		 * See if the next page up is valid so that we can
+		 * generate some kind of backtrace if this happens.
+		 */
+		void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
+		if (get_stack_info(next_page, state->task, &state->stack_info,
+				   &state->stack_mask))
+			return;
+	}
 
 	/*
 	 * The caller can provide the address of the first frame directly
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index a4009fb9be8725ce7bda96cd5e8160e524903266..1e413a9326aaa152a47d51fa4c1d1ea03cbb4d94 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -61,11 +61,17 @@ jiffies_64 = jiffies;
 		. = ALIGN(HPAGE_SIZE);				\
 		__end_rodata_hpage_align = .;
 
+#define ALIGN_ENTRY_TEXT_BEGIN	. = ALIGN(PMD_SIZE);
+#define ALIGN_ENTRY_TEXT_END	. = ALIGN(PMD_SIZE);
+
 #else
 
 #define X64_ALIGN_RODATA_BEGIN
 #define X64_ALIGN_RODATA_END
 
+#define ALIGN_ENTRY_TEXT_BEGIN
+#define ALIGN_ENTRY_TEXT_END
+
 #endif
 
 PHDRS {
@@ -102,11 +108,22 @@ SECTIONS
 		CPUIDLE_TEXT
 		LOCK_TEXT
 		KPROBES_TEXT
+		ALIGN_ENTRY_TEXT_BEGIN
 		ENTRY_TEXT
 		IRQENTRY_TEXT
+		ALIGN_ENTRY_TEXT_END
 		SOFTIRQENTRY_TEXT
 		*(.fixup)
 		*(.gnu.warning)
+
+#ifdef CONFIG_X86_64
+		. = ALIGN(PAGE_SIZE);
+		_entry_trampoline = .;
+		*(.entry_trampoline)
+		. = ALIGN(PAGE_SIZE);
+		ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
+#endif
+
 		/* End of text section */
 		_etext = .;
 	} :text = 0x9090
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index abe74f779f9d793e9a6c2f19417f23b5aa7ce484..b514b2b2845a334d4b53f28ed0b73c96f12d0e6a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2390,9 +2390,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 }
 
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
-				     u64 cr0, u64 cr4)
+				    u64 cr0, u64 cr3, u64 cr4)
 {
 	int bad;
+	u64 pcid;
+
+	/* In order to later set CR4.PCIDE, CR3[11:0] must be zero.  */
+	pcid = 0;
+	if (cr4 & X86_CR4_PCIDE) {
+		pcid = cr3 & 0xfff;
+		cr3 &= ~0xfff;
+	}
+
+	bad = ctxt->ops->set_cr(ctxt, 3, cr3);
+	if (bad)
+		return X86EMUL_UNHANDLEABLE;
 
 	/*
 	 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
@@ -2411,6 +2423,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
 		bad = ctxt->ops->set_cr(ctxt, 4, cr4);
 		if (bad)
 			return X86EMUL_UNHANDLEABLE;
+		if (pcid) {
+			bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
+			if (bad)
+				return X86EMUL_UNHANDLEABLE;
+		}
+
 	}
 
 	return X86EMUL_CONTINUE;
@@ -2421,11 +2439,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	struct desc_struct desc;
 	struct desc_ptr dt;
 	u16 selector;
-	u32 val, cr0, cr4;
+	u32 val, cr0, cr3, cr4;
 	int i;
 
 	cr0 =                      GET_SMSTATE(u32, smbase, 0x7ffc);
-	ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
+	cr3 =                      GET_SMSTATE(u32, smbase, 0x7ff8);
 	ctxt->eflags =             GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
 	ctxt->_eip =               GET_SMSTATE(u32, smbase, 0x7ff0);
 
@@ -2467,14 +2485,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
 
 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
 
-	return rsm_enter_protected_mode(ctxt, cr0, cr4);
+	return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 }
 
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 {
 	struct desc_struct desc;
 	struct desc_ptr dt;
-	u64 val, cr0, cr4;
+	u64 val, cr0, cr3, cr4;
 	u32 base3;
 	u16 selector;
 	int i, r;
@@ -2491,7 +2509,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
 
 	cr0 =                       GET_SMSTATE(u64, smbase, 0x7f58);
-	ctxt->ops->set_cr(ctxt, 3,  GET_SMSTATE(u64, smbase, 0x7f50));
+	cr3 =                       GET_SMSTATE(u64, smbase, 0x7f50);
 	cr4 =                       GET_SMSTATE(u64, smbase, 0x7f48);
 	ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
 	val =                       GET_SMSTATE(u64, smbase, 0x7ed0);
@@ -2519,7 +2537,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
 	dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
 	ctxt->ops->set_gdt(ctxt, &dt);
 
-	r = rsm_enter_protected_mode(ctxt, cr0, cr4);
+	r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
 	if (r != X86EMUL_CONTINUE)
 		return r;
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e5e66e5c664057bb5cc5ad2660008ccbf19b69e5..c4deb1f34faa6ce7ffe6bcaaebddc3e87b2a9a69 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3395,7 +3395,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 		spin_lock(&vcpu->kvm->mmu_lock);
 		if(make_mmu_pages_available(vcpu) < 0) {
 			spin_unlock(&vcpu->kvm->mmu_lock);
-			return 1;
+			return -ENOSPC;
 		}
 		sp = kvm_mmu_get_page(vcpu, 0, 0,
 				vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
@@ -3410,7 +3410,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 			spin_lock(&vcpu->kvm->mmu_lock);
 			if (make_mmu_pages_available(vcpu) < 0) {
 				spin_unlock(&vcpu->kvm->mmu_lock);
-				return 1;
+				return -ENOSPC;
 			}
 			sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
 					i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
@@ -3450,7 +3450,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 		spin_lock(&vcpu->kvm->mmu_lock);
 		if (make_mmu_pages_available(vcpu) < 0) {
 			spin_unlock(&vcpu->kvm->mmu_lock);
-			return 1;
+			return -ENOSPC;
 		}
 		sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
 				vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
@@ -3487,7 +3487,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
 		spin_lock(&vcpu->kvm->mmu_lock);
 		if (make_mmu_pages_available(vcpu) < 0) {
 			spin_unlock(&vcpu->kvm->mmu_lock);
-			return 1;
+			return -ENOSPC;
 		}
 		sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
 				      0, ACC_ALL);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8eba631c4dbd509d8687c6135e8dba267042f5e0..023afa0c8887002d6a79a8b121b46996feec1a61 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2302,7 +2302,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 		 * processors.  See 22.2.4.
 		 */
 		vmcs_writel(HOST_TR_BASE,
-			    (unsigned long)this_cpu_ptr(&cpu_tss));
+			    (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
 		vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
 
 		/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index faf843c9b916ead0992d0b155a138c6afdf7ae57..1cec2c62a0b08405d2bd7c8908d6b7f33de3b63c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4384,7 +4384,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
 					 addr, n, v))
 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
 			break;
-		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
+		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
 		handled += n;
 		addr += n;
 		len -= n;
@@ -4643,7 +4643,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 {
 	if (vcpu->mmio_read_completed) {
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-			       vcpu->mmio_fragments[0].gpa, *(u64 *)val);
+			       vcpu->mmio_fragments[0].gpa, val);
 		vcpu->mmio_read_completed = 0;
 		return 1;
 	}
@@ -4665,14 +4665,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
 {
-	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
 }
 
 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
 			  void *val, int bytes)
 {
-	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
 	return X86EMUL_IO_NEEDED;
 }
 
@@ -7264,13 +7264,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
-	struct fpu *fpu = &current->thread.fpu;
 	int r;
 
-	fpu__initialize(fpu);
-
 	kvm_sigset_activate(vcpu);
 
+	kvm_load_guest_fpu(vcpu);
+
 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
 		if (kvm_run->immediate_exit) {
 			r = -EINTR;
@@ -7296,14 +7295,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 		}
 	}
 
-	kvm_load_guest_fpu(vcpu);
-
 	if (unlikely(vcpu->arch.complete_userspace_io)) {
 		int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
 		vcpu->arch.complete_userspace_io = NULL;
 		r = cui(vcpu);
 		if (r <= 0)
-			goto out_fpu;
+			goto out;
 	} else
 		WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
 
@@ -7312,9 +7309,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 	else
 		r = vcpu_run(vcpu);
 
-out_fpu:
-	kvm_put_guest_fpu(vcpu);
 out:
+	kvm_put_guest_fpu(vcpu);
 	post_kvm_run_save(vcpu);
 	kvm_sigset_deactivate(vcpu);
 
@@ -7384,7 +7380,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 #endif
 
 	kvm_rip_write(vcpu, regs->rip);
-	kvm_set_rflags(vcpu, regs->rflags);
+	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
 
 	vcpu->arch.exception.pending = false;
 
@@ -7498,6 +7494,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
 
+int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+	if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
+		/*
+		 * When EFER.LME and CR0.PG are set, the processor is in
+		 * 64-bit mode (though maybe in a 32-bit code segment).
+		 * CR4.PAE and EFER.LMA must be set.
+		 */
+		if (!(sregs->cr4 & X86_CR4_PAE_BIT)
+		    || !(sregs->efer & EFER_LMA))
+			return -EINVAL;
+	} else {
+		/*
+		 * Not in 64-bit mode: EFER.LMA is clear and the code
+		 * segment cannot be 64-bit.
+		 */
+		if (sregs->efer & EFER_LMA || sregs->cs.l)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 				  struct kvm_sregs *sregs)
 {
@@ -7510,6 +7529,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 			(sregs->cr4 & X86_CR4_OSXSAVE))
 		return -EINVAL;
 
+	if (kvm_valid_sregs(vcpu, sregs))
+		return -EINVAL;
+
 	apic_base_msr.data = sregs->apic_base;
 	apic_base_msr.host_initiated = true;
 	if (kvm_set_apic_base(vcpu, &apic_base_msr))
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 553f8fd23cc4733d0edafa862b95446f7a04bab1..4846eff7e4c8b1505501d7f1dcb64127d0a4c67c 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -107,10 +107,10 @@ static void delay_mwaitx(unsigned long __loops)
 		delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
 
 		/*
-		 * Use cpu_tss as a cacheline-aligned, seldomly
+		 * Use cpu_tss_rw as a cacheline-aligned, seldomly
 		 * accessed per-cpu variable as the monitor target.
 		 */
-		__monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
+		__monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
 		/*
 		 * AMD, like Intel, supports the EAX hint and EAX=0xf
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 8e13b8cc6bedb0dc84eea64cd80ca6ae39037eaa..27e9e90a8d3572b900ccaacae1623de803072b17 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o	= -pg
 endif
 
 obj-y	:=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
-	    pat.o pgtable.o physaddr.o setup_nx.o tlb.o
+	    pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
 
 # Make sure __phys_addr has no stackprotector
 nostackp := $(call cc-option, -fno-stack-protector)
@@ -41,9 +41,10 @@ obj-$(CONFIG_AMD_NUMA)		+= amdtopology.o
 obj-$(CONFIG_ACPI_NUMA)		+= srat.o
 obj-$(CONFIG_NUMA_EMU)		+= numa_emulation.o
 
-obj-$(CONFIG_X86_INTEL_MPX)	+= mpx.o
-obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
-obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
+obj-$(CONFIG_X86_INTEL_MPX)			+= mpx.o
+obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)	+= pkeys.o
+obj-$(CONFIG_RANDOMIZE_MEMORY)			+= kaslr.o
+obj-$(CONFIG_PAGE_TABLE_ISOLATION)		+= pti.o
 
 obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= mem_encrypt.o
 obj-$(CONFIG_AMD_MEM_ENCRYPT)	+= mem_encrypt_boot.o
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
new file mode 100644
index 0000000000000000000000000000000000000000..b9283cc276220db667ab091a3358eb5741813f7f
--- /dev/null
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+
+#include <asm/cpu_entry_area.h>
+#include <asm/pgtable.h>
+#include <asm/fixmap.h>
+#include <asm/desc.h>
+
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
+
+#ifdef CONFIG_X86_64
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+	[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+#endif
+
+struct cpu_entry_area *get_cpu_entry_area(int cpu)
+{
+	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
+	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
+
+	return (struct cpu_entry_area *) va;
+}
+EXPORT_SYMBOL(get_cpu_entry_area);
+
+void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
+{
+	unsigned long va = (unsigned long) cea_vaddr;
+
+	set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
+}
+
+static void __init
+cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
+{
+	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
+		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
+}
+
+static void percpu_setup_debug_store(int cpu)
+{
+#ifdef CONFIG_CPU_SUP_INTEL
+	int npages;
+	void *cea;
+
+	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+		return;
+
+	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
+	npages = sizeof(struct debug_store) / PAGE_SIZE;
+	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
+	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
+			     PAGE_KERNEL);
+
+	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
+	/*
+	 * Force the population of PMDs for not yet allocated per cpu
+	 * memory like debug store buffers.
+	 */
+	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
+	for (; npages; npages--, cea += PAGE_SIZE)
+		cea_set_pte(cea, 0, PAGE_NONE);
+#endif
+}
+
+/* Setup the fixmap mappings only once per-processor */
+static void __init setup_cpu_entry_area(int cpu)
+{
+#ifdef CONFIG_X86_64
+	extern char _entry_trampoline[];
+
+	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
+	pgprot_t gdt_prot = PAGE_KERNEL_RO;
+	pgprot_t tss_prot = PAGE_KERNEL_RO;
+#else
+	/*
+	 * On native 32-bit systems, the GDT cannot be read-only because
+	 * our double fault handler uses a task gate, and entering through
+	 * a task gate needs to change an available TSS to busy.  If the
+	 * GDT is read-only, that will triple fault.  The TSS cannot be
+	 * read-only because the CPU writes to it on task switches.
+	 *
+	 * On Xen PV, the GDT must be read-only because the hypervisor
+	 * requires it.
+	 */
+	pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
+		PAGE_KERNEL_RO : PAGE_KERNEL;
+	pgprot_t tss_prot = PAGE_KERNEL;
+#endif
+
+	cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
+		    gdt_prot);
+
+	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
+			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
+			     PAGE_KERNEL);
+
+	/*
+	 * The Intel SDM says (Volume 3, 7.2.1):
+	 *
+	 *  Avoid placing a page boundary in the part of the TSS that the
+	 *  processor reads during a task switch (the first 104 bytes). The
+	 *  processor may not correctly perform address translations if a
+	 *  boundary occurs in this area. During a task switch, the processor
+	 *  reads and writes into the first 104 bytes of each TSS (using
+	 *  contiguous physical addresses beginning with the physical address
+	 *  of the first byte of the TSS). So, after TSS access begins, if
+	 *  part of the 104 bytes is not physically contiguous, the processor
+	 *  will access incorrect information without generating a page-fault
+	 *  exception.
+	 *
+	 * There are also a lot of errata involving the TSS spanning a page
+	 * boundary.  Assert that we're not doing that.
+	 */
+	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
+		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
+	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
+	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
+			     &per_cpu(cpu_tss_rw, cpu),
+			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
+
+#ifdef CONFIG_X86_32
+	per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+#endif
+
+#ifdef CONFIG_X86_64
+	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+	BUILD_BUG_ON(sizeof(exception_stacks) !=
+		     sizeof(((struct cpu_entry_area *)0)->exception_stacks));
+	cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
+			     &per_cpu(exception_stacks, cpu),
+			     sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
+
+	cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
+		     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
+#endif
+	percpu_setup_debug_store(cpu);
+}
+
+static __init void setup_cpu_entry_area_ptes(void)
+{
+#ifdef CONFIG_X86_32
+	unsigned long start, end;
+
+	BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
+	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
+
+	start = CPU_ENTRY_AREA_BASE;
+	end = start + CPU_ENTRY_AREA_MAP_SIZE;
+
+	/* Careful here: start + PMD_SIZE might wrap around */
+	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
+		populate_extra_pte(start);
+#endif
+}
+
+void __init setup_cpu_entry_areas(void)
+{
+	unsigned int cpu;
+
+	setup_cpu_entry_area_ptes();
+
+	for_each_possible_cpu(cpu)
+		setup_cpu_entry_area(cpu);
+}
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
index bfcffdf6c5775f7ac5bd4c9f768573d7ae7bba55..421f2664ffa06e6cd4b31e5d6521648add3e2c4e 100644
--- a/arch/x86/mm/debug_pagetables.c
+++ b/arch/x86/mm/debug_pagetables.c
@@ -5,7 +5,7 @@
 
 static int ptdump_show(struct seq_file *m, void *v)
 {
-	ptdump_walk_pgd_level(m, NULL);
+	ptdump_walk_pgd_level_debugfs(m, NULL, false);
 	return 0;
 }
 
@@ -22,21 +22,89 @@ static const struct file_operations ptdump_fops = {
 	.release	= single_release,
 };
 
-static struct dentry *pe;
+static int ptdump_show_curknl(struct seq_file *m, void *v)
+{
+	if (current->mm->pgd) {
+		down_read(&current->mm->mmap_sem);
+		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false);
+		up_read(&current->mm->mmap_sem);
+	}
+	return 0;
+}
+
+static int ptdump_open_curknl(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, ptdump_show_curknl, NULL);
+}
+
+static const struct file_operations ptdump_curknl_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ptdump_open_curknl,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+static struct dentry *pe_curusr;
+
+static int ptdump_show_curusr(struct seq_file *m, void *v)
+{
+	if (current->mm->pgd) {
+		down_read(&current->mm->mmap_sem);
+		ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true);
+		up_read(&current->mm->mmap_sem);
+	}
+	return 0;
+}
+
+static int ptdump_open_curusr(struct inode *inode, struct file *filp)
+{
+	return single_open(filp, ptdump_show_curusr, NULL);
+}
+
+static const struct file_operations ptdump_curusr_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ptdump_open_curusr,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+#endif
+
+static struct dentry *dir, *pe_knl, *pe_curknl;
 
 static int __init pt_dump_debug_init(void)
 {
-	pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL,
-				 &ptdump_fops);
-	if (!pe)
+	dir = debugfs_create_dir("page_tables", NULL);
+	if (!dir)
 		return -ENOMEM;
 
+	pe_knl = debugfs_create_file("kernel", 0400, dir, NULL,
+				     &ptdump_fops);
+	if (!pe_knl)
+		goto err;
+
+	pe_curknl = debugfs_create_file("current_kernel", 0400,
+					dir, NULL, &ptdump_curknl_fops);
+	if (!pe_curknl)
+		goto err;
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	pe_curusr = debugfs_create_file("current_user", 0400,
+					dir, NULL, &ptdump_curusr_fops);
+	if (!pe_curusr)
+		goto err;
+#endif
 	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENOMEM;
 }
 
 static void __exit pt_dump_debug_exit(void)
 {
-	debugfs_remove_recursive(pe);
+	debugfs_remove_recursive(dir);
 }
 
 module_init(pt_dump_debug_init);
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 5e3ac6fe6c9e32ed1906f4f9bf736310a7193c7d..f56902c1f04b94e1543710dee75d2d7dee19b7c9 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -44,68 +44,97 @@ struct addr_marker {
 	unsigned long max_lines;
 };
 
-/* indices for address_markers; keep sync'd w/ address_markers below */
+/* Address space markers hints */
+
+#ifdef CONFIG_X86_64
+
 enum address_markers_idx {
 	USER_SPACE_NR = 0,
-#ifdef CONFIG_X86_64
 	KERNEL_SPACE_NR,
 	LOW_KERNEL_NR,
+#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
+	LDT_NR,
+#endif
 	VMALLOC_START_NR,
 	VMEMMAP_START_NR,
 #ifdef CONFIG_KASAN
 	KASAN_SHADOW_START_NR,
 	KASAN_SHADOW_END_NR,
 #endif
-# ifdef CONFIG_X86_ESPFIX64
+#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
+	LDT_NR,
+#endif
+	CPU_ENTRY_AREA_NR,
+#ifdef CONFIG_X86_ESPFIX64
 	ESPFIX_START_NR,
-# endif
+#endif
+#ifdef CONFIG_EFI
+	EFI_END_NR,
+#endif
 	HIGH_KERNEL_NR,
 	MODULES_VADDR_NR,
 	MODULES_END_NR,
-#else
+	FIXADDR_START_NR,
+	END_OF_SPACE_NR,
+};
+
+static struct addr_marker address_markers[] = {
+	[USER_SPACE_NR]		= { 0,			"User Space" },
+	[KERNEL_SPACE_NR]	= { (1UL << 63),	"Kernel Space" },
+	[LOW_KERNEL_NR]		= { 0UL,		"Low Kernel Mapping" },
+	[VMALLOC_START_NR]	= { 0UL,		"vmalloc() Area" },
+	[VMEMMAP_START_NR]	= { 0UL,		"Vmemmap" },
+#ifdef CONFIG_KASAN
+	[KASAN_SHADOW_START_NR]	= { KASAN_SHADOW_START,	"KASAN shadow" },
+	[KASAN_SHADOW_END_NR]	= { KASAN_SHADOW_END,	"KASAN shadow end" },
+#endif
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+	[LDT_NR]		= { LDT_BASE_ADDR,	"LDT remap" },
+#endif
+	[CPU_ENTRY_AREA_NR]	= { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
+#ifdef CONFIG_X86_ESPFIX64
+	[ESPFIX_START_NR]	= { ESPFIX_BASE_ADDR,	"ESPfix Area", 16 },
+#endif
+#ifdef CONFIG_EFI
+	[EFI_END_NR]		= { EFI_VA_END,		"EFI Runtime Services" },
+#endif
+	[HIGH_KERNEL_NR]	= { __START_KERNEL_map,	"High Kernel Mapping" },
+	[MODULES_VADDR_NR]	= { MODULES_VADDR,	"Modules" },
+	[MODULES_END_NR]	= { MODULES_END,	"End Modules" },
+	[FIXADDR_START_NR]	= { FIXADDR_START,	"Fixmap Area" },
+	[END_OF_SPACE_NR]	= { -1,			NULL }
+};
+
+#else /* CONFIG_X86_64 */
+
+enum address_markers_idx {
+	USER_SPACE_NR = 0,
 	KERNEL_SPACE_NR,
 	VMALLOC_START_NR,
 	VMALLOC_END_NR,
-# ifdef CONFIG_HIGHMEM
+#ifdef CONFIG_HIGHMEM
 	PKMAP_BASE_NR,
-# endif
-	FIXADDR_START_NR,
 #endif
+	CPU_ENTRY_AREA_NR,
+	FIXADDR_START_NR,
+	END_OF_SPACE_NR,
 };
 
-/* Address space markers hints */
 static struct addr_marker address_markers[] = {
-	{ 0, "User Space" },
-#ifdef CONFIG_X86_64
-	{ 0x8000000000000000UL, "Kernel Space" },
-	{ 0/* PAGE_OFFSET */,   "Low Kernel Mapping" },
-	{ 0/* VMALLOC_START */, "vmalloc() Area" },
-	{ 0/* VMEMMAP_START */, "Vmemmap" },
-#ifdef CONFIG_KASAN
-	{ KASAN_SHADOW_START,	"KASAN shadow" },
-	{ KASAN_SHADOW_END,	"KASAN shadow end" },
+	[USER_SPACE_NR]		= { 0,			"User Space" },
+	[KERNEL_SPACE_NR]	= { PAGE_OFFSET,	"Kernel Mapping" },
+	[VMALLOC_START_NR]	= { 0UL,		"vmalloc() Area" },
+	[VMALLOC_END_NR]	= { 0UL,		"vmalloc() End" },
+#ifdef CONFIG_HIGHMEM
+	[PKMAP_BASE_NR]		= { 0UL,		"Persistent kmap() Area" },
 #endif
-# ifdef CONFIG_X86_ESPFIX64
-	{ ESPFIX_BASE_ADDR,	"ESPfix Area", 16 },
-# endif
-# ifdef CONFIG_EFI
-	{ EFI_VA_END,		"EFI Runtime Services" },
-# endif
-	{ __START_KERNEL_map,   "High Kernel Mapping" },
-	{ MODULES_VADDR,        "Modules" },
-	{ MODULES_END,          "End Modules" },
-#else
-	{ PAGE_OFFSET,          "Kernel Mapping" },
-	{ 0/* VMALLOC_START */, "vmalloc() Area" },
-	{ 0/*VMALLOC_END*/,     "vmalloc() End" },
-# ifdef CONFIG_HIGHMEM
-	{ 0/*PKMAP_BASE*/,      "Persistent kmap() Area" },
-# endif
-	{ 0/*FIXADDR_START*/,   "Fixmap Area" },
-#endif
-	{ -1, NULL }		/* End of list */
+	[CPU_ENTRY_AREA_NR]	= { 0UL,		"CPU entry area" },
+	[FIXADDR_START_NR]	= { 0UL,		"Fixmap area" },
+	[END_OF_SPACE_NR]	= { -1,			NULL }
 };
 
+#endif /* !CONFIG_X86_64 */
+
 /* Multipliers for offsets within the PTEs */
 #define PTE_LEVEL_MULT (PAGE_SIZE)
 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
@@ -140,7 +169,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
 	static const char * const level_name[] =
 		{ "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
 
-	if (!pgprot_val(prot)) {
+	if (!(pr & _PAGE_PRESENT)) {
 		/* Not present */
 		pt_dump_cont_printf(m, dmsg, "                              ");
 	} else {
@@ -447,7 +476,7 @@ static inline bool is_hypervisor_range(int idx)
 }
 
 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
-				       bool checkwx)
+				       bool checkwx, bool dmesg)
 {
 #ifdef CONFIG_X86_64
 	pgd_t *start = (pgd_t *) &init_top_pgt;
@@ -460,7 +489,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 
 	if (pgd) {
 		start = pgd;
-		st.to_dmesg = true;
+		st.to_dmesg = dmesg;
 	}
 
 	st.check_wx = checkwx;
@@ -498,13 +527,37 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
 
 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
 {
-	ptdump_walk_pgd_level_core(m, pgd, false);
+	ptdump_walk_pgd_level_core(m, pgd, false, true);
+}
+
+void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	if (user && static_cpu_has(X86_FEATURE_PTI))
+		pgd = kernel_to_user_pgdp(pgd);
+#endif
+	ptdump_walk_pgd_level_core(m, pgd, false, false);
+}
+EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
+
+static void ptdump_walk_user_pgd_level_checkwx(void)
+{
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+	pgd_t *pgd = (pgd_t *) &init_top_pgt;
+
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return;
+
+	pr_info("x86/mm: Checking user space page tables\n");
+	pgd = kernel_to_user_pgdp(pgd);
+	ptdump_walk_pgd_level_core(NULL, pgd, true, false);
+#endif
 }
-EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level);
 
 void ptdump_walk_pgd_level_checkwx(void)
 {
-	ptdump_walk_pgd_level_core(NULL, NULL, true);
+	ptdump_walk_pgd_level_core(NULL, NULL, true, false);
+	ptdump_walk_user_pgd_level_checkwx();
 }
 
 static int __init pt_dump_init(void)
@@ -525,8 +578,8 @@ static int __init pt_dump_init(void)
 	address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
 # endif
 	address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
+	address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
 #endif
-
 	return 0;
 }
 __initcall(pt_dump_init);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index febf6980e6535572f998cf2fa0ee63d296bdc6f1..06fe3d51d385b88111961c0b5addc673fcd597a2 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -860,7 +860,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 	if (!printk_ratelimit())
 		return;
 
-	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+	printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 		tsk->comm, task_pid_nr(tsk), address,
 		(void *)regs->ip, (void *)regs->sp, error_code);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 6fdf91ef130a4737ab434ce98f77b2583fe1840d..8ca324d072828e19700ba094dbeae433b767c964 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -20,6 +20,7 @@
 #include <asm/kaslr.h>
 #include <asm/hypervisor.h>
 #include <asm/cpufeature.h>
+#include <asm/pti.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -160,6 +161,12 @@ struct map_range {
 
 static int page_size_mask;
 
+static void enable_global_pages(void)
+{
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		__supported_pte_mask |= _PAGE_GLOBAL;
+}
+
 static void __init probe_page_size_mask(void)
 {
 	/*
@@ -177,11 +184,11 @@ static void __init probe_page_size_mask(void)
 		cr4_set_bits_and_update_boot(X86_CR4_PSE);
 
 	/* Enable PGE if available */
+	__supported_pte_mask &= ~_PAGE_GLOBAL;
 	if (boot_cpu_has(X86_FEATURE_PGE)) {
 		cr4_set_bits_and_update_boot(X86_CR4_PGE);
-		__supported_pte_mask |= _PAGE_GLOBAL;
-	} else
-		__supported_pte_mask &= ~_PAGE_GLOBAL;
+		enable_global_pages();
+	}
 
 	/* Enable 1 GB linear kernel mappings if available: */
 	if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
@@ -194,34 +201,44 @@ static void __init probe_page_size_mask(void)
 
 static void setup_pcid(void)
 {
-#ifdef CONFIG_X86_64
-	if (boot_cpu_has(X86_FEATURE_PCID)) {
-		if (boot_cpu_has(X86_FEATURE_PGE)) {
-			/*
-			 * This can't be cr4_set_bits_and_update_boot() --
-			 * the trampoline code can't handle CR4.PCIDE and
-			 * it wouldn't do any good anyway.  Despite the name,
-			 * cr4_set_bits_and_update_boot() doesn't actually
-			 * cause the bits in question to remain set all the
-			 * way through the secondary boot asm.
-			 *
-			 * Instead, we brute-force it and set CR4.PCIDE
-			 * manually in start_secondary().
-			 */
-			cr4_set_bits(X86_CR4_PCIDE);
-		} else {
-			/*
-			 * flush_tlb_all(), as currently implemented, won't
-			 * work if PCID is on but PGE is not.  Since that
-			 * combination doesn't exist on real hardware, there's
-			 * no reason to try to fully support it, but it's
-			 * polite to avoid corrupting data if we're on
-			 * an improperly configured VM.
-			 */
-			setup_clear_cpu_cap(X86_FEATURE_PCID);
-		}
+	if (!IS_ENABLED(CONFIG_X86_64))
+		return;
+
+	if (!boot_cpu_has(X86_FEATURE_PCID))
+		return;
+
+	if (boot_cpu_has(X86_FEATURE_PGE)) {
+		/*
+		 * This can't be cr4_set_bits_and_update_boot() -- the
+		 * trampoline code can't handle CR4.PCIDE and it wouldn't
+		 * do any good anyway.  Despite the name,
+		 * cr4_set_bits_and_update_boot() doesn't actually cause
+		 * the bits in question to remain set all the way through
+		 * the secondary boot asm.
+		 *
+		 * Instead, we brute-force it and set CR4.PCIDE manually in
+		 * start_secondary().
+		 */
+		cr4_set_bits(X86_CR4_PCIDE);
+
+		/*
+		 * INVPCID's single-context modes (2/3) only work if we set
+		 * X86_CR4_PCIDE, *and* we INVPCID support.  It's unusable
+		 * on systems that have X86_CR4_PCIDE clear, or that have
+		 * no INVPCID support at all.
+		 */
+		if (boot_cpu_has(X86_FEATURE_INVPCID))
+			setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
+	} else {
+		/*
+		 * flush_tlb_all(), as currently implemented, won't work if
+		 * PCID is on but PGE is not.  Since that combination
+		 * doesn't exist on real hardware, there's no reason to try
+		 * to fully support it, but it's polite to avoid corrupting
+		 * data if we're on an improperly configured VM.
+		 */
+		setup_clear_cpu_cap(X86_FEATURE_PCID);
 	}
-#endif
 }
 
 #ifdef CONFIG_X86_32
@@ -622,6 +639,7 @@ void __init init_mem_mapping(void)
 {
 	unsigned long end;
 
+	pti_check_boottime_disable();
 	probe_page_size_mask();
 	setup_pcid();
 
@@ -845,7 +863,7 @@ void __init zone_sizes_init(void)
 	free_area_init_nodes(max_zone_pfns);
 }
 
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
 	.loaded_mm = &init_mm,
 	.next_asid = 1,
 	.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8a64a6f2848d9be2e73a341f4d87ab2dc35de09f..135c9a7898c7da908f1340f9750774b4327e63b3 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -50,6 +50,7 @@
 #include <asm/setup.h>
 #include <asm/set_memory.h>
 #include <asm/page_types.h>
+#include <asm/cpu_entry_area.h>
 #include <asm/init.h>
 
 #include "mm_internal.h"
@@ -766,6 +767,7 @@ void __init mem_init(void)
 	mem_init_print_info(NULL);
 	printk(KERN_INFO "virtual kernel memory layout:\n"
 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"  cpu_entry : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #ifdef CONFIG_HIGHMEM
 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 #endif
@@ -777,6 +779,10 @@ void __init mem_init(void)
 		FIXADDR_START, FIXADDR_TOP,
 		(FIXADDR_TOP - FIXADDR_START) >> 10,
 
+		CPU_ENTRY_AREA_BASE,
+		CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
+		CPU_ENTRY_AREA_MAP_SIZE >> 10,
+
 #ifdef CONFIG_HIGHMEM
 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
 		(LAST_PKMAP*PAGE_SIZE) >> 10,
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 99dfed6dfef8b2f9028f82b89ab8dc2bde8173c4..47388f0c0e59649ca3574d4e7c31b356dad7d247 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -15,6 +15,7 @@
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
+#include <asm/cpu_entry_area.h>
 
 extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
@@ -277,6 +278,7 @@ void __init kasan_early_init(void)
 void __init kasan_init(void)
 {
 	int i;
+	void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
 
 #ifdef CONFIG_KASAN_INLINE
 	register_die_notifier(&kasan_die_notifier);
@@ -321,16 +323,33 @@ void __init kasan_init(void)
 		map_range(&pfn_mapped[i]);
 	}
 
+	shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
+	shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
+	shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
+						PAGE_SIZE);
+
+	shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
+					CPU_ENTRY_AREA_MAP_SIZE);
+	shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
+	shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
+					PAGE_SIZE);
+
 	kasan_populate_zero_shadow(
 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
-		kasan_mem_to_shadow((void *)__START_KERNEL_map));
+		shadow_cpu_entry_begin);
+
+	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
+			      (unsigned long)shadow_cpu_entry_end, 0);
+
+	kasan_populate_zero_shadow(shadow_cpu_entry_end,
+				kasan_mem_to_shadow((void *)__START_KERNEL_map));
 
 	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
 			      (unsigned long)kasan_mem_to_shadow(_end),
 			      early_pfn_to_nid(__pa(_stext)));
 
 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
-			(void *)KASAN_SHADOW_END);
+				(void *)KASAN_SHADOW_END);
 
 	load_cr3(init_top_pgt);
 	__flush_tlb_all();
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index d9a9e9fc75dd7b511050adc6bd4c914b0c4fcdaa..391b13402e403041222ef8e6bd22f60edaeb9446 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -405,13 +405,13 @@ bool sme_active(void)
 {
 	return sme_me_mask && !sev_enabled;
 }
-EXPORT_SYMBOL_GPL(sme_active);
+EXPORT_SYMBOL(sme_active);
 
 bool sev_active(void)
 {
 	return sme_me_mask && sev_enabled;
 }
-EXPORT_SYMBOL_GPL(sev_active);
+EXPORT_SYMBOL(sev_active);
 
 static const struct dma_map_ops sev_dma_ops = {
 	.alloc                  = sev_alloc,
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 96d456a94b0342eb967f918e4233498ac24e4349..004abf9ebf1222c169448090f7f1c570635bce41 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -355,14 +355,15 @@ static inline void _pgd_free(pgd_t *pgd)
 		kmem_cache_free(pgd_cache, pgd);
 }
 #else
+
 static inline pgd_t *_pgd_alloc(void)
 {
-	return (pgd_t *)__get_free_page(PGALLOC_GFP);
+	return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
 }
 
 static inline void _pgd_free(pgd_t *pgd)
 {
-	free_page((unsigned long)pgd);
+	free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
 }
 #endif /* CONFIG_X86_PAE */
 
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 6b9bf023a700559b87ae7ac89570d9bbd26d1f05..c3c5274410a908e762aed936406006d63c3116ac 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -10,6 +10,7 @@
 #include <linux/pagemap.h>
 #include <linux/spinlock.h>
 
+#include <asm/cpu_entry_area.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/fixmap.h>
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
new file mode 100644
index 0000000000000000000000000000000000000000..bce8aea656062197ac0649e7a204d0c5cd0a5e29
--- /dev/null
+++ b/arch/x86/mm/pti.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * This code is based in part on work published here:
+ *
+ *	https://github.com/IAIK/KAISER
+ *
+ * The original work was written by and and signed off by for the Linux
+ * kernel by:
+ *
+ *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
+ *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
+ *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
+ *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
+ *
+ * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
+ * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
+ *		       Andy Lutomirsky <luto@amacapital.net>
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpufeature.h>
+#include <asm/hypervisor.h>
+#include <asm/vsyscall.h>
+#include <asm/cmdline.h>
+#include <asm/pti.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/desc.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
+
+/* Backporting helper */
+#ifndef __GFP_NOTRACK
+#define __GFP_NOTRACK	0
+#endif
+
+static void __init pti_print_if_insecure(const char *reason)
+{
+	if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
+		pr_info("%s\n", reason);
+}
+
+static void __init pti_print_if_secure(const char *reason)
+{
+	if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
+		pr_info("%s\n", reason);
+}
+
+void __init pti_check_boottime_disable(void)
+{
+	char arg[5];
+	int ret;
+
+	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
+		pti_print_if_insecure("disabled on XEN PV.");
+		return;
+	}
+
+	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
+	if (ret > 0)  {
+		if (ret == 3 && !strncmp(arg, "off", 3)) {
+			pti_print_if_insecure("disabled on command line.");
+			return;
+		}
+		if (ret == 2 && !strncmp(arg, "on", 2)) {
+			pti_print_if_secure("force enabled on command line.");
+			goto enable;
+		}
+		if (ret == 4 && !strncmp(arg, "auto", 4))
+			goto autosel;
+	}
+
+	if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+		pti_print_if_insecure("disabled on command line.");
+		return;
+	}
+
+autosel:
+	if (!boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
+		return;
+enable:
+	setup_force_cpu_cap(X86_FEATURE_PTI);
+}
+
+pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+	/*
+	 * Changes to the high (kernel) portion of the kernelmode page
+	 * tables are not automatically propagated to the usermode tables.
+	 *
+	 * Users should keep in mind that, unlike the kernelmode tables,
+	 * there is no vmalloc_fault equivalent for the usermode tables.
+	 * Top-level entries added to init_mm's usermode pgd after boot
+	 * will not be automatically propagated to other mms.
+	 */
+	if (!pgdp_maps_userspace(pgdp))
+		return pgd;
+
+	/*
+	 * The user page tables get the full PGD, accessible from
+	 * userspace:
+	 */
+	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
+
+	/*
+	 * If this is normal user memory, make it NX in the kernel
+	 * pagetables so that, if we somehow screw up and return to
+	 * usermode with the kernel CR3 loaded, we'll get a page fault
+	 * instead of allowing user code to execute with the wrong CR3.
+	 *
+	 * As exceptions, we don't set NX if:
+	 *  - _PAGE_USER is not set.  This could be an executable
+	 *     EFI runtime mapping or something similar, and the kernel
+	 *     may execute from it
+	 *  - we don't have NX support
+	 *  - we're clearing the PGD (i.e. the new pgd is not present).
+	 */
+	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
+	    (__supported_pte_mask & _PAGE_NX))
+		pgd.pgd |= _PAGE_NX;
+
+	/* return the copy of the PGD we want the kernel to use: */
+	return pgd;
+}
+
+/*
+ * Walk the user copy of the page tables (optionally) trying to allocate
+ * page table pages on the way down.
+ *
+ * Returns a pointer to a P4D on success, or NULL on failure.
+ */
+static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
+{
+	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
+	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+
+	if (address < PAGE_OFFSET) {
+		WARN_ONCE(1, "attempt to walk user address\n");
+		return NULL;
+	}
+
+	if (pgd_none(*pgd)) {
+		unsigned long new_p4d_page = __get_free_page(gfp);
+		if (!new_p4d_page)
+			return NULL;
+
+		if (pgd_none(*pgd)) {
+			set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
+			new_p4d_page = 0;
+		}
+		if (new_p4d_page)
+			free_page(new_p4d_page);
+	}
+	BUILD_BUG_ON(pgd_large(*pgd) != 0);
+
+	return p4d_offset(pgd, address);
+}
+
+/*
+ * Walk the user copy of the page tables (optionally) trying to allocate
+ * page table pages on the way down.
+ *
+ * Returns a pointer to a PMD on success, or NULL on failure.
+ */
+static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+{
+	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+	p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
+	pud_t *pud;
+
+	BUILD_BUG_ON(p4d_large(*p4d) != 0);
+	if (p4d_none(*p4d)) {
+		unsigned long new_pud_page = __get_free_page(gfp);
+		if (!new_pud_page)
+			return NULL;
+
+		if (p4d_none(*p4d)) {
+			set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
+			new_pud_page = 0;
+		}
+		if (new_pud_page)
+			free_page(new_pud_page);
+	}
+
+	pud = pud_offset(p4d, address);
+	/* The user page tables do not use large mappings: */
+	if (pud_large(*pud)) {
+		WARN_ON(1);
+		return NULL;
+	}
+	if (pud_none(*pud)) {
+		unsigned long new_pmd_page = __get_free_page(gfp);
+		if (!new_pmd_page)
+			return NULL;
+
+		if (pud_none(*pud)) {
+			set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
+			new_pmd_page = 0;
+		}
+		if (new_pmd_page)
+			free_page(new_pmd_page);
+	}
+
+	return pmd_offset(pud, address);
+}
+
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+/*
+ * Walk the shadow copy of the page tables (optionally) trying to allocate
+ * page table pages on the way down.  Does not support large pages.
+ *
+ * Note: this is only used when mapping *new* kernel data into the
+ * user/shadow page tables.  It is never used for userspace data.
+ *
+ * Returns a pointer to a PTE on success, or NULL on failure.
+ */
+static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+{
+	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+	pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
+	pte_t *pte;
+
+	/* We can't do anything sensible if we hit a large mapping. */
+	if (pmd_large(*pmd)) {
+		WARN_ON(1);
+		return NULL;
+	}
+
+	if (pmd_none(*pmd)) {
+		unsigned long new_pte_page = __get_free_page(gfp);
+		if (!new_pte_page)
+			return NULL;
+
+		if (pmd_none(*pmd)) {
+			set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
+			new_pte_page = 0;
+		}
+		if (new_pte_page)
+			free_page(new_pte_page);
+	}
+
+	pte = pte_offset_kernel(pmd, address);
+	if (pte_flags(*pte) & _PAGE_USER) {
+		WARN_ONCE(1, "attempt to walk to user pte\n");
+		return NULL;
+	}
+	return pte;
+}
+
+static void __init pti_setup_vsyscall(void)
+{
+	pte_t *pte, *target_pte;
+	unsigned int level;
+
+	pte = lookup_address(VSYSCALL_ADDR, &level);
+	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
+		return;
+
+	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
+	if (WARN_ON(!target_pte))
+		return;
+
+	*target_pte = *pte;
+	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
+}
+#else
+static void __init pti_setup_vsyscall(void) { }
+#endif
+
+static void __init
+pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
+{
+	unsigned long addr;
+
+	/*
+	 * Clone the populated PMDs which cover start to end. These PMD areas
+	 * can have holes.
+	 */
+	for (addr = start; addr < end; addr += PMD_SIZE) {
+		pmd_t *pmd, *target_pmd;
+		pgd_t *pgd;
+		p4d_t *p4d;
+		pud_t *pud;
+
+		pgd = pgd_offset_k(addr);
+		if (WARN_ON(pgd_none(*pgd)))
+			return;
+		p4d = p4d_offset(pgd, addr);
+		if (WARN_ON(p4d_none(*p4d)))
+			return;
+		pud = pud_offset(p4d, addr);
+		if (pud_none(*pud))
+			continue;
+		pmd = pmd_offset(pud, addr);
+		if (pmd_none(*pmd))
+			continue;
+
+		target_pmd = pti_user_pagetable_walk_pmd(addr);
+		if (WARN_ON(!target_pmd))
+			return;
+
+		/*
+		 * Copy the PMD.  That is, the kernelmode and usermode
+		 * tables will share the last-level page tables of this
+		 * address range
+		 */
+		*target_pmd = pmd_clear_flags(*pmd, clear);
+	}
+}
+
+/*
+ * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
+ * next-level entry on 5-level systems.
+ */
+static void __init pti_clone_p4d(unsigned long addr)
+{
+	p4d_t *kernel_p4d, *user_p4d;
+	pgd_t *kernel_pgd;
+
+	user_p4d = pti_user_pagetable_walk_p4d(addr);
+	kernel_pgd = pgd_offset_k(addr);
+	kernel_p4d = p4d_offset(kernel_pgd, addr);
+	*user_p4d = *kernel_p4d;
+}
+
+/*
+ * Clone the CPU_ENTRY_AREA into the user space visible page table.
+ */
+static void __init pti_clone_user_shared(void)
+{
+	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
+}
+
+/*
+ * Clone the ESPFIX P4D into the user space visinble page table
+ */
+static void __init pti_setup_espfix64(void)
+{
+#ifdef CONFIG_X86_ESPFIX64
+	pti_clone_p4d(ESPFIX_BASE_ADDR);
+#endif
+}
+
+/*
+ * Clone the populated PMDs of the entry and irqentry text and force it RO.
+ */
+static void __init pti_clone_entry_text(void)
+{
+	pti_clone_pmds((unsigned long) __entry_text_start,
+			(unsigned long) __irqentry_text_end, _PAGE_RW);
+}
+
+/*
+ * Initialize kernel page table isolation
+ */
+void __init pti_init(void)
+{
+	if (!static_cpu_has(X86_FEATURE_PTI))
+		return;
+
+	pr_info("enabled\n");
+
+	pti_clone_user_shared();
+	pti_clone_entry_text();
+	pti_setup_espfix64();
+	pti_setup_vsyscall();
+}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3118392cdf756bfc913d7a4137d5f7e0d46b046d..a1561957dccbb82d188d8209d76f7eddb780519a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -28,6 +28,38 @@
  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
+/*
+ * We get here when we do something requiring a TLB invalidation
+ * but could not go invalidate all of the contexts.  We do the
+ * necessary invalidation by clearing out the 'ctx_id' which
+ * forces a TLB flush when the context is loaded.
+ */
+void clear_asid_other(void)
+{
+	u16 asid;
+
+	/*
+	 * This is only expected to be set if we have disabled
+	 * kernel _PAGE_GLOBAL pages.
+	 */
+	if (!static_cpu_has(X86_FEATURE_PTI)) {
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
+		/* Do not need to flush the current asid */
+		if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
+			continue;
+		/*
+		 * Make sure the next time we go to switch to
+		 * this asid, we do a flush:
+		 */
+		this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
+	}
+	this_cpu_write(cpu_tlbstate.invalidate_other, false);
+}
+
 atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
 
 
@@ -42,6 +74,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
 		return;
 	}
 
+	if (this_cpu_read(cpu_tlbstate.invalidate_other))
+		clear_asid_other();
+
 	for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
 		if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
 		    next->context.ctx_id)
@@ -65,6 +100,25 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
 	*need_flush = true;
 }
 
+static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush)
+{
+	unsigned long new_mm_cr3;
+
+	if (need_flush) {
+		invalidate_user_asid(new_asid);
+		new_mm_cr3 = build_cr3(pgdir, new_asid);
+	} else {
+		new_mm_cr3 = build_cr3_noflush(pgdir, new_asid);
+	}
+
+	/*
+	 * Caution: many callers of this function expect
+	 * that load_cr3() is serializing and orders TLB
+	 * fills with respect to the mm_cpumask writes.
+	 */
+	write_cr3(new_mm_cr3);
+}
+
 void leave_mm(int cpu)
 {
 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
@@ -128,7 +182,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 	 * isn't free.
 	 */
 #ifdef CONFIG_DEBUG_VM
-	if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
+	if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
 		/*
 		 * If we were to BUG here, we'd be very likely to kill
 		 * the system so hard that we don't see the call trace.
@@ -195,7 +249,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 		if (need_flush) {
 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
 			this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
-			write_cr3(build_cr3(next, new_asid));
+			load_new_mm_cr3(next->pgd, new_asid, true);
 
 			/*
 			 * NB: This gets called via leave_mm() in the idle path
@@ -208,7 +262,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 		} else {
 			/* The new ASID is already up to date. */
-			write_cr3(build_cr3_noflush(next, new_asid));
+			load_new_mm_cr3(next->pgd, new_asid, false);
 
 			/* See above wrt _rcuidle. */
 			trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
@@ -288,7 +342,7 @@ void initialize_tlbstate_and_flush(void)
 		!(cr4_read_shadow() & X86_CR4_PCIDE));
 
 	/* Force ASID 0 and force a TLB flush. */
-	write_cr3(build_cr3(mm, 0));
+	write_cr3(build_cr3(mm->pgd, 0));
 
 	/* Reinitialize tlbstate. */
 	this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
@@ -551,7 +605,7 @@ static void do_kernel_range_flush(void *info)
 
 	/* flush range by one by one 'invlpg' */
 	for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
-		__flush_tlb_single(addr);
+		__flush_tlb_one(addr);
 }
 
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6a151ce70e865caadde95c859855c4b63283ad4b..d87ac96e37ede3ea93dabb67d99538fdadc03de0 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -196,6 +196,9 @@ static pgd_t *efi_pgd;
  * because we want to avoid inserting EFI region mappings (EFI_VA_END
  * to EFI_VA_START) into the standard kernel page tables. Everything
  * else can be shared, see efi_sync_low_kernel_mappings().
+ *
+ * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
+ * allocation.
  */
 int __init efi_alloc_page_tables(void)
 {
@@ -208,7 +211,7 @@ int __init efi_alloc_page_tables(void)
 		return 0;
 
 	gfp_mask = GFP_KERNEL | __GFP_ZERO;
-	efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+	efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
 	if (!efi_pgd)
 		return -ENOMEM;
 
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index f44c0bc95aa2f45ad42462a5f23f4db4672d1257..8538a6723171a5606058a8823ed1cbb2d343fdb6 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
 		local_flush_tlb();
 		stat->d_alltlb++;
 	} else {
-		__flush_tlb_one(msg->address);
+		__flush_tlb_single(msg->address);
 		stat->d_onetlb++;
 	}
 	stat->d_requestee++;
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 5f6fd860820a3c5175609a5a3468262aac937604..e4cb9f4cde8ae224afe51ced1970805df293c956 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -128,7 +128,7 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
  * on the specified blade to allow the sending of MSIs to the specified CPU.
  */
 static int uv_domain_activate(struct irq_domain *domain,
-			      struct irq_data *irq_data, bool early)
+			      struct irq_data *irq_data, bool reserve)
 {
 	uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
 	return 0;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 36a28eddb435e72d2abc5ffbdd1e78a46b56876e..a7d966964c6f20577c927cf5e618bc86b3331977 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -152,17 +152,19 @@ static void do_fpu_end(void)
 static void fix_processor_context(void)
 {
 	int cpu = smp_processor_id();
-	struct tss_struct *t = &per_cpu(cpu_tss, cpu);
 #ifdef CONFIG_X86_64
 	struct desc_struct *desc = get_cpu_gdt_rw(cpu);
 	tss_desc tss;
 #endif
-	set_tss_desc(cpu, t);	/*
-				 * This just modifies memory; should not be
-				 * necessary. But... This is necessary, because
-				 * 386 hardware has concept of busy TSS or some
-				 * similar stupidity.
-				 */
+
+	/*
+	 * We need to reload TR, which requires that we change the
+	 * GDT entry to indicate "available" first.
+	 *
+	 * XXX: This could probably all be replaced by a call to
+	 * force_reload_TR().
+	 */
+	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
 
 #ifdef CONFIG_X86_64
 	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index d669e9d890017770456abe458f1161eb2509c09e..c9081c6671f0b7a05ecfaaf206e7e1ed2b1f456a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1,8 +1,12 @@
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+#include <linux/bootmem.h>
+#endif
 #include <linux/cpu.h>
 #include <linux/kexec.h>
 
 #include <xen/features.h>
 #include <xen/page.h>
+#include <xen/interface/memory.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num)
 }
 EXPORT_SYMBOL(xen_arch_unregister_cpu);
 #endif
+
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+void __init arch_xen_balloon_init(struct resource *hostmem_resource)
+{
+	struct xen_memory_map memmap;
+	int rc;
+	unsigned int i, last_guest_ram;
+	phys_addr_t max_addr = PFN_PHYS(max_pfn);
+	struct e820_table *xen_e820_table;
+	const struct e820_entry *entry;
+	struct resource *res;
+
+	if (!xen_initial_domain())
+		return;
+
+	xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
+	if (!xen_e820_table)
+		return;
+
+	memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
+	set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
+	rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
+	if (rc) {
+		pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
+		goto out;
+	}
+
+	last_guest_ram = 0;
+	for (i = 0; i < memmap.nr_entries; i++) {
+		if (xen_e820_table->entries[i].addr >= max_addr)
+			break;
+		if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
+			last_guest_ram = i;
+	}
+
+	entry = &xen_e820_table->entries[last_guest_ram];
+	if (max_addr >= entry->addr + entry->size)
+		goto out; /* No unallocated host RAM. */
+
+	hostmem_resource->start = max_addr;
+	hostmem_resource->end = entry->addr + entry->size;
+
+	/*
+	 * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
+	 * as unavailable. The rest of that region can be used for hotplug-based
+	 * ballooning.
+	 */
+	for (; i < memmap.nr_entries; i++) {
+		entry = &xen_e820_table->entries[i];
+
+		if (entry->type == E820_TYPE_RAM)
+			continue;
+
+		if (entry->addr >= hostmem_resource->end)
+			break;
+
+		res = kzalloc(sizeof(*res), GFP_KERNEL);
+		if (!res)
+			goto out;
+
+		res->name = "Unavailable host RAM";
+		res->start = entry->addr;
+		res->end = (entry->addr + entry->size < hostmem_resource->end) ?
+			    entry->addr + entry->size : hostmem_resource->end;
+		rc = insert_resource(hostmem_resource, res);
+		if (rc) {
+			pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
+				__func__, res->start, res->end, rc);
+			kfree(res);
+			goto  out;
+		}
+	}
+
+ out:
+	kfree(xen_e820_table);
+}
+#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index f2414c6c5e7c455b43fc45773fbd1264cf86c24e..c047f42552e1a61ed0a5787d904681974cc05af1 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -88,6 +88,8 @@
 #include "multicalls.h"
 #include "pmu.h"
 
+#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */
+
 void *xen_initial_gdt;
 
 static int xen_cpu_up_prepare_pv(unsigned int cpu);
@@ -826,7 +828,7 @@ static void xen_load_sp0(unsigned long sp0)
 	mcs = xen_mc_entry(0);
 	MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
 	xen_mc_issue(PARAVIRT_LAZY_CPU);
-	this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+	this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 void xen_set_iopl_mask(unsigned mask)
@@ -1258,6 +1260,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 
 	/* Work out if we support NX */
+	get_cpu_cap(&boot_cpu_data);
 	x86_configure_nx();
 
 	/* Get mfn list */
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index fc048ec686e7699b263254c79b482ccf935c21ef..4d62c071b166f65c848a12ca07bfe44ca20e198a 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1902,6 +1902,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 	/* Graft it onto L4[511][510] */
 	copy_page(level2_kernel_pgt, l2);
 
+	/*
+	 * Zap execute permission from the ident map. Due to the sharing of
+	 * L1 entries we need to do this in the L2.
+	 */
+	if (__supported_pte_mask & _PAGE_NX) {
+		for (i = 0; i < PTRS_PER_PMD; ++i) {
+			if (pmd_none(level2_ident_pgt[i]))
+				continue;
+			level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
+		}
+	}
+
 	/* Copy the initial P->M table mappings if necessary. */
 	i = pgd_index(xen_start_info->mfn_list);
 	if (i && i < pgd_index(__START_KERNEL_map))
@@ -2261,7 +2273,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 
 	switch (idx) {
 	case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
-	case FIX_RO_IDT:
 #ifdef CONFIG_X86_32
 	case FIX_WP_TEST:
 # ifdef CONFIG_HIGHMEM
@@ -2272,7 +2283,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
 #endif
 	case FIX_TEXT_POKE0:
 	case FIX_TEXT_POKE1:
-	case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END:
 		/* All local page mappings */
 		pte = pfn_pte(phys, prot);
 		break;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c114ca767b3b8a382918e2b0160983fa257318db..6e0d2086eacbf37326467b5142e59750151a5328 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -808,7 +808,6 @@ char * __init xen_memory_setup(void)
 	addr = xen_e820_table.entries[0].addr;
 	size = xen_e820_table.entries[0].size;
 	while (i < xen_e820_table.nr_entries) {
-		bool discard = false;
 
 		chunk_size = size;
 		type = xen_e820_table.entries[i].type;
@@ -824,11 +823,10 @@ char * __init xen_memory_setup(void)
 				xen_add_extra_mem(pfn_s, n_pfns);
 				xen_max_p2m_pfn = pfn_s + n_pfns;
 			} else
-				discard = true;
+				type = E820_TYPE_UNUSABLE;
 		}
 
-		if (!discard)
-			xen_align_and_add_e820_region(addr, chunk_size, type);
+		xen_align_and_add_e820_region(addr, chunk_size, type);
 
 		addr += chunk_size;
 		size -= chunk_size;
diff --git a/block/bio.c b/block/bio.c
index 8bfdea58159ba9ffd972dd95717e0eee99101e0a..9ef6cf3addb38cae822d0e5c5ef18ba9e98cd2d7 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -599,6 +599,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 	bio->bi_disk = bio_src->bi_disk;
 	bio->bi_partno = bio_src->bi_partno;
 	bio_set_flag(bio, BIO_CLONED);
+	if (bio_flagged(bio_src, BIO_THROTTLED))
+		bio_set_flag(bio, BIO_THROTTLED);
 	bio->bi_opf = bio_src->bi_opf;
 	bio->bi_write_hint = bio_src->bi_write_hint;
 	bio->bi_iter = bio_src->bi_iter;
diff --git a/block/blk-map.c b/block/blk-map.c
index b21f8e86f1207f9b76bf3e2083fcf72b5062f0b7..d3a94719f03fb2af81d6270d6fc9ed58f0dde373 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -12,22 +12,29 @@
 #include "blk.h"
 
 /*
- * Append a bio to a passthrough request.  Only works can be merged into
- * the request based on the driver constraints.
+ * Append a bio to a passthrough request.  Only works if the bio can be merged
+ * into the request based on the driver constraints.
  */
-int blk_rq_append_bio(struct request *rq, struct bio *bio)
+int blk_rq_append_bio(struct request *rq, struct bio **bio)
 {
-	blk_queue_bounce(rq->q, &bio);
+	struct bio *orig_bio = *bio;
+
+	blk_queue_bounce(rq->q, bio);
 
 	if (!rq->bio) {
-		blk_rq_bio_prep(rq->q, rq, bio);
+		blk_rq_bio_prep(rq->q, rq, *bio);
 	} else {
-		if (!ll_back_merge_fn(rq->q, rq, bio))
+		if (!ll_back_merge_fn(rq->q, rq, *bio)) {
+			if (orig_bio != *bio) {
+				bio_put(*bio);
+				*bio = orig_bio;
+			}
 			return -EINVAL;
+		}
 
-		rq->biotail->bi_next = bio;
-		rq->biotail = bio;
-		rq->__data_len += bio->bi_iter.bi_size;
+		rq->biotail->bi_next = *bio;
+		rq->biotail = *bio;
+		rq->__data_len += (*bio)->bi_iter.bi_size;
 	}
 
 	return 0;
@@ -73,14 +80,12 @@ static int __blk_rq_map_user_iov(struct request *rq,
 	 * We link the bounce buffer in and could have to traverse it
 	 * later so we have to get a ref to prevent it from being freed
 	 */
-	ret = blk_rq_append_bio(rq, bio);
-	bio_get(bio);
+	ret = blk_rq_append_bio(rq, &bio);
 	if (ret) {
-		bio_endio(bio);
 		__blk_rq_unmap_user(orig_bio);
-		bio_put(bio);
 		return ret;
 	}
+	bio_get(bio);
 
 	return 0;
 }
@@ -213,7 +218,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 	int reading = rq_data_dir(rq) == READ;
 	unsigned long addr = (unsigned long) kbuf;
 	int do_copy = 0;
-	struct bio *bio;
+	struct bio *bio, *orig_bio;
 	int ret;
 
 	if (len > (queue_max_hw_sectors(q) << 9))
@@ -236,10 +241,11 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 	if (do_copy)
 		rq->rq_flags |= RQF_COPY_USER;
 
-	ret = blk_rq_append_bio(rq, bio);
+	orig_bio = bio;
+	ret = blk_rq_append_bio(rq, &bio);
 	if (unlikely(ret)) {
 		/* request is too big */
-		bio_put(bio);
+		bio_put(orig_bio);
 		return ret;
 	}
 
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 825bc29767e6699ac85675d319a9866b70cc9b84..d19f416d61012ac032c49608f0afe463c948e8bc 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2226,13 +2226,7 @@ again:
 out_unlock:
 	spin_unlock_irq(q->queue_lock);
 out:
-	/*
-	 * As multiple blk-throtls may stack in the same issue path, we
-	 * don't want bios to leave with the flag set.  Clear the flag if
-	 * being issued.
-	 */
-	if (!throttled)
-		bio_clear_flag(bio, BIO_THROTTLED);
+	bio_set_flag(bio, BIO_THROTTLED);
 
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 	if (throttled || !td->track_bio_latency)
diff --git a/block/bounce.c b/block/bounce.c
index fceb1a96480bfb9600e4664fa2b4992c8bb64210..1d05c422c932ad56d705f94deed6cce0891ff9d3 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -200,6 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	unsigned i = 0;
 	bool bounce = false;
 	int sectors = 0;
+	bool passthrough = bio_is_passthrough(*bio_orig);
 
 	bio_for_each_segment(from, *bio_orig, iter) {
 		if (i++ < BIO_MAX_PAGES)
@@ -210,13 +211,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 	if (!bounce)
 		return;
 
-	if (sectors < bio_sectors(*bio_orig)) {
+	if (!passthrough && sectors < bio_sectors(*bio_orig)) {
 		bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
 		bio_chain(bio, *bio_orig);
 		generic_make_request(*bio_orig);
 		*bio_orig = bio;
 	}
-	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
+	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, passthrough ? NULL :
+			bounce_bio_set);
 
 	bio_for_each_segment_all(to, bio, i) {
 		struct page *page = to->bv_page;
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index b4df317c291692f01138b91608dc6c80f71bb9aa..f95c60774ce8ca613417d3ccf54bee52010752ee 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -100,9 +100,13 @@ struct kyber_hctx_data {
 	unsigned int cur_domain;
 	unsigned int batching;
 	wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+	struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
 	atomic_t wait_index[KYBER_NUM_DOMAINS];
 };
 
+static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+			     void *key);
+
 static int rq_sched_domain(const struct request *rq)
 {
 	unsigned int op = rq->cmd_flags;
@@ -385,6 +389,9 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
 
 	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
 		INIT_LIST_HEAD(&khd->rqs[i]);
+		init_waitqueue_func_entry(&khd->domain_wait[i],
+					  kyber_domain_wake);
+		khd->domain_wait[i].private = hctx;
 		INIT_LIST_HEAD(&khd->domain_wait[i].entry);
 		atomic_set(&khd->wait_index[i], 0);
 	}
@@ -524,35 +531,39 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
 	int nr;
 
 	nr = __sbitmap_queue_get(domain_tokens);
-	if (nr >= 0)
-		return nr;
 
 	/*
 	 * If we failed to get a domain token, make sure the hardware queue is
 	 * run when one becomes available. Note that this is serialized on
 	 * khd->lock, but we still need to be careful about the waker.
 	 */
-	if (list_empty_careful(&wait->entry)) {
-		init_waitqueue_func_entry(wait, kyber_domain_wake);
-		wait->private = hctx;
+	if (nr < 0 && list_empty_careful(&wait->entry)) {
 		ws = sbq_wait_ptr(domain_tokens,
 				  &khd->wait_index[sched_domain]);
+		khd->domain_ws[sched_domain] = ws;
 		add_wait_queue(&ws->wait, wait);
 
 		/*
 		 * Try again in case a token was freed before we got on the wait
-		 * queue. The waker may have already removed the entry from the
-		 * wait queue, but list_del_init() is okay with that.
+		 * queue.
 		 */
 		nr = __sbitmap_queue_get(domain_tokens);
-		if (nr >= 0) {
-			unsigned long flags;
+	}
 
-			spin_lock_irqsave(&ws->wait.lock, flags);
-			list_del_init(&wait->entry);
-			spin_unlock_irqrestore(&ws->wait.lock, flags);
-		}
+	/*
+	 * If we got a token while we were on the wait queue, remove ourselves
+	 * from the wait queue to ensure that all wake ups make forward
+	 * progress. It's possible that the waker already deleted the entry
+	 * between the !list_empty_careful() check and us grabbing the lock, but
+	 * list_del_init() is okay with that.
+	 */
+	if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+		ws = khd->domain_ws[sched_domain];
+		spin_lock_irq(&ws->wait.lock);
+		list_del_init(&wait->entry);
+		spin_unlock_irq(&ws->wait.lock);
 	}
+
 	return nr;
 }
 
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 415a54ced4d6a490ae1e09170c8b80ef3eef135e..444a387df219e96a35fb7972f2a1a810e013a60b 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1138,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
 		if (!af_alg_readable(sk))
 			break;
 
-		if (!ctx->used) {
-			err = af_alg_wait_for_data(sk, flags);
-			if (err)
-				return err;
-		}
-
 		seglen = min_t(size_t, (maxsize - len),
 			       msg_data_left(msg));
 
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 48b34e9c68342c55610ad83900557dc1c785af41..ddcc45f77edd367bf118e46aa757891c5c3d8869 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 	size_t usedpages = 0;		/* [in]  RX bufs to be used from user */
 	size_t processed = 0;		/* [in]  TX bufs to be consumed */
 
+	if (!ctx->used) {
+		err = af_alg_wait_for_data(sk, flags);
+		if (err)
+			return err;
+	}
+
 	/*
 	 * Data length provided by caller via sendmsg/sendpage that has not
 	 * yet been processed.
@@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 		/* AIO operation */
 		sock_hold(sk);
 		areq->iocb = msg->msg_iocb;
+
+		/* Remember output size that will be generated. */
+		areq->outlen = outlen;
+
 		aead_request_set_callback(&areq->cra_u.aead_req,
 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
 					  af_alg_async_cb, areq);
@@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 				 crypto_aead_decrypt(&areq->cra_u.aead_req);
 
 		/* AIO operation in progress */
-		if (err == -EINPROGRESS || err == -EBUSY) {
-			/* Remember output size that will be generated. */
-			areq->outlen = outlen;
-
+		if (err == -EINPROGRESS || err == -EBUSY)
 			return -EIOCBQUEUED;
-		}
 
 		sock_put(sk);
 	} else {
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 30cff827dd8fff048fa3e2ca7de770ab73022749..baef9bfccddaa94728bea5933bea16c32b2a32b5 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 	int err = 0;
 	size_t len = 0;
 
+	if (!ctx->used) {
+		err = af_alg_wait_for_data(sk, flags);
+		if (err)
+			return err;
+	}
+
 	/* Allocate cipher request for current operation. */
 	areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
 				     crypto_skcipher_reqsize(tfm));
@@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 		/* AIO operation */
 		sock_hold(sk);
 		areq->iocb = msg->msg_iocb;
+
+		/* Remember output size that will be generated. */
+		areq->outlen = len;
+
 		skcipher_request_set_callback(&areq->cra_u.skcipher_req,
 					      CRYPTO_TFM_REQ_MAY_SLEEP,
 					      af_alg_async_cb, areq);
@@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 			crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
 
 		/* AIO operation in progress */
-		if (err == -EINPROGRESS || err == -EBUSY) {
-			/* Remember output size that will be generated. */
-			areq->outlen = len;
-
+		if (err == -EINPROGRESS || err == -EBUSY)
 			return -EIOCBQUEUED;
-		}
 
 		sock_put(sk);
 	} else {
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index 4e64726588524f137acd590809bef11673695ed2..eca04d3729b37c696c2dac4b0ac472422f30615d 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
 		pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 		INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
+		spin_lock_init(&cpu_queue->q_lock);
 	}
 	return 0;
 }
@@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
 	int cpu, err;
 	struct mcryptd_cpu_queue *cpu_queue;
 
-	cpu = get_cpu();
-	cpu_queue = this_cpu_ptr(queue->cpu_queue);
-	rctx->tag.cpu = cpu;
+	cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+	spin_lock(&cpu_queue->q_lock);
+	cpu = smp_processor_id();
+	rctx->tag.cpu = smp_processor_id();
 
 	err = crypto_enqueue_request(&cpu_queue->queue, request);
 	pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
 		 cpu, cpu_queue, request);
+	spin_unlock(&cpu_queue->q_lock);
 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-	put_cpu();
 
 	return err;
 }
@@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
 	cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
 	i = 0;
 	while (i < MCRYPTD_BATCH || single_task_running()) {
-		/*
-		 * preempt_disable/enable is used to prevent
-		 * being preempted by mcryptd_enqueue_request()
-		 */
-		local_bh_disable();
-		preempt_disable();
+
+		spin_lock_bh(&cpu_queue->q_lock);
 		backlog = crypto_get_backlog(&cpu_queue->queue);
 		req = crypto_dequeue_request(&cpu_queue->queue);
-		preempt_enable();
-		local_bh_enable();
+		spin_unlock_bh(&cpu_queue->q_lock);
 
 		if (!req) {
 			mcryptd_opportunistic_flush();
@@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
 		++i;
 	}
 	if (cpu_queue->queue.qlen)
-		queue_work(kcrypto_wq, &cpu_queue->work);
+		queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
 }
 
 void mcryptd_flusher(struct work_struct *__work)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 778e0ff42bfa801eda5be848da9e6747ebbc2626..11af5fd6a443570550e1dac5b0a429b2cae801b1 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 
 	walk->total = req->cryptlen;
 	walk->nbytes = 0;
+	walk->iv = req->iv;
+	walk->oiv = req->iv;
 
 	if (unlikely(!walk->total))
 		return 0;
@@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 	scatterwalk_start(&walk->in, req->src);
 	scatterwalk_start(&walk->out, req->dst);
 
-	walk->iv = req->iv;
-	walk->oiv = req->iv;
-
 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
 	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 		       SKCIPHER_WALK_SLEEP : 0;
@@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
 	int err;
 
 	walk->nbytes = 0;
+	walk->iv = req->iv;
+	walk->oiv = req->iv;
 
 	if (unlikely(!walk->total))
 		return 0;
@@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
 	scatterwalk_done(&walk->in, 0, walk->total);
 	scatterwalk_done(&walk->out, 0, walk->total);
 
-	walk->iv = req->iv;
-	walk->oiv = req->iv;
-
 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
 		walk->flags |= SKCIPHER_WALK_SLEEP;
 	else
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6742f6c68034c5e833505d294902dd97c274c1b0..9bff853e85f37831d8d053a2aa363f139537c9b5 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1007,7 +1007,7 @@ skip:
 	/* The record may be cleared by others, try read next record */
 	if (len == -ENOENT)
 		goto skip;
-	else if (len < sizeof(*rcd)) {
+	else if (len < 0 || len < sizeof(*rcd)) {
 		rc = -EIO;
 		goto out;
 	}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 30e84cc600ae6438c25aec2f2975ae4e3f144553..06ea4749ebd9826a3d7b8b0a9798a1cc797f4d61 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1171,7 +1171,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
 	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
 	struct cpc_register_resource *desired_reg;
 	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
-	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
+	struct cppc_pcc_data *pcc_ss_data;
 	int ret = 0;
 
 	if (!cpc_desc || pcc_ss_id < 0) {
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ff2580e7611d18c6d56c58d50c2cbc3a2d54aa36..abeb4df4f22e43d7f0d1398af9962135a37af4b6 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1670,6 +1670,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 				dev_name(&adev_dimm->dev));
 		return -ENXIO;
 	}
+	/*
+	 * Record nfit_mem for the notification path to track back to
+	 * the nfit sysfs attributes for this dimm device object.
+	 */
+	dev_set_drvdata(&adev_dimm->dev, nfit_mem);
 
 	/*
 	 * Until standardization materializes we need to consider 4
@@ -1752,9 +1757,11 @@ static void shutdown_dimm_notify(void *data)
 			sysfs_put(nfit_mem->flags_attr);
 			nfit_mem->flags_attr = NULL;
 		}
-		if (adev_dimm)
+		if (adev_dimm) {
 			acpi_remove_notify_handler(adev_dimm->handle,
 					ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
+			dev_set_drvdata(&adev_dimm->dev, NULL);
+		}
 	}
 	mutex_unlock(&acpi_desc->init_mutex);
 }
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index bccec9de05330b2fe6822369e5c7a409e8759e95..a7ecfde66b7b34f44cc9cffeac0be1b703e29aff 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -482,7 +482,8 @@ enum binder_deferred_state {
  * @tsk                   task_struct for group_leader of process
  *                        (invariant after initialized)
  * @files                 files_struct for process
- *                        (invariant after initialized)
+ *                        (protected by @files_lock)
+ * @files_lock            mutex to protect @files
  * @deferred_work_node:   element for binder_deferred_list
  *                        (protected by binder_deferred_lock)
  * @deferred_work:        bitmap of deferred work to perform
@@ -530,6 +531,7 @@ struct binder_proc {
 	int pid;
 	struct task_struct *tsk;
 	struct files_struct *files;
+	struct mutex files_lock;
 	struct hlist_node deferred_work_node;
 	int deferred_work;
 	bool is_dead;
@@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
 
 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 {
-	struct files_struct *files = proc->files;
 	unsigned long rlim_cur;
 	unsigned long irqs;
+	int ret;
 
-	if (files == NULL)
-		return -ESRCH;
-
-	if (!lock_task_sighand(proc->tsk, &irqs))
-		return -EMFILE;
-
+	mutex_lock(&proc->files_lock);
+	if (proc->files == NULL) {
+		ret = -ESRCH;
+		goto err;
+	}
+	if (!lock_task_sighand(proc->tsk, &irqs)) {
+		ret = -EMFILE;
+		goto err;
+	}
 	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
 	unlock_task_sighand(proc->tsk, &irqs);
 
-	return __alloc_fd(files, 0, rlim_cur, flags);
+	ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
+err:
+	mutex_unlock(&proc->files_lock);
+	return ret;
 }
 
 /*
@@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 static void task_fd_install(
 	struct binder_proc *proc, unsigned int fd, struct file *file)
 {
+	mutex_lock(&proc->files_lock);
 	if (proc->files)
 		__fd_install(proc->files, fd, file);
+	mutex_unlock(&proc->files_lock);
 }
 
 /*
@@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 {
 	int retval;
 
-	if (proc->files == NULL)
-		return -ESRCH;
-
+	mutex_lock(&proc->files_lock);
+	if (proc->files == NULL) {
+		retval = -ESRCH;
+		goto err;
+	}
 	retval = __close_fd(proc->files, fd);
 	/* can't restart close syscall because file table entry was cleared */
 	if (unlikely(retval == -ERESTARTSYS ||
@@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
 		     retval == -ERESTARTNOHAND ||
 		     retval == -ERESTART_RESTARTBLOCK))
 		retval = -EINTR;
-
+err:
+	mutex_unlock(&proc->files_lock);
 	return retval;
 }
 
@@ -4627,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 	ret = binder_alloc_mmap_handler(&proc->alloc, vma);
 	if (ret)
 		return ret;
+	mutex_lock(&proc->files_lock);
 	proc->files = get_files_struct(current);
+	mutex_unlock(&proc->files_lock);
 	return 0;
 
 err_bad_arg:
@@ -4651,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
 	spin_lock_init(&proc->outer_lock);
 	get_task_struct(current->group_leader);
 	proc->tsk = current->group_leader;
+	mutex_init(&proc->files_lock);
 	INIT_LIST_HEAD(&proc->todo);
 	proc->default_priority = task_nice(current);
 	binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4903,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work)
 
 		files = NULL;
 		if (defer & BINDER_DEFERRED_PUT_FILES) {
+			mutex_lock(&proc->files_lock);
 			files = proc->files;
 			if (files)
 				proc->files = NULL;
+			mutex_unlock(&proc->files_lock);
 		}
 
 		if (defer & BINDER_DEFERRED_FLUSH)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index eb3af2739537a8def39259206e2345a2adc72c71..07532d83be0bca7d06aa5e99670ac920435e0ed9 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf)
 		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
 }
 
+static bool cache_node_is_unified(struct cacheinfo *this_leaf)
+{
+	return of_property_read_bool(this_leaf->of_node, "cache-unified");
+}
+
 static void cache_of_override_properties(unsigned int cpu)
 {
 	int index;
@@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu)
 
 	for (index = 0; index < cache_leaves(cpu); index++) {
 		this_leaf = this_cpu_ci->info_list + index;
+		/*
+		 * init_cache_level must setup the cache level correctly
+		 * overriding the architecturally specified levels, so
+		 * if type is NONE at this stage, it should be unified
+		 */
+		if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+		    cache_node_is_unified(this_leaf))
+			this_leaf->type = CACHE_TYPE_UNIFIED;
 		cache_size(this_leaf);
 		cache_get_line_size(this_leaf);
 		cache_nr_sets(this_leaf);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ccb9975a97fa3f214d658776450ab618bae26643..ad0477ae820f040affe54f4368d3a02d9da63350 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -35,13 +35,13 @@ static inline u64 mb_per_tick(int mbps)
 struct nullb_cmd {
 	struct list_head list;
 	struct llist_node ll_list;
-	call_single_data_t csd;
+	struct __call_single_data csd;
 	struct request *rq;
 	struct bio *bio;
 	unsigned int tag;
+	blk_status_t error;
 	struct nullb_queue *nq;
 	struct hrtimer timer;
-	blk_status_t error;
 };
 
 struct nullb_queue {
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 647d056df88c8dd2a7d8288e35fa2eeba9b7705b..b56c11f51bafad32bafc7b5b9c7467e7cd16632b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -220,7 +220,8 @@ static bool clk_core_is_enabled(struct clk_core *core)
 
 	ret = core->ops->is_enabled(core->hw);
 done:
-	clk_pm_runtime_put(core);
+	if (core->dev)
+		pm_runtime_put(core->dev);
 
 	return ret;
 }
@@ -1564,6 +1565,9 @@ static void clk_change_rate(struct clk_core *core)
 		best_parent_rate = core->parent->rate;
 	}
 
+	if (clk_pm_runtime_get(core))
+		return;
+
 	if (core->flags & CLK_SET_RATE_UNGATE) {
 		unsigned long flags;
 
@@ -1634,6 +1638,8 @@ static void clk_change_rate(struct clk_core *core)
 	/* handle the new child who might not be in core->children yet */
 	if (core->new_child)
 		clk_change_rate(core->new_child);
+
+	clk_pm_runtime_put(core);
 }
 
 static int clk_core_set_rate_nolock(struct clk_core *core,
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index a1a634253d6f2299bfad888b2fa193c98b4ac019..f00d8758ba24f6e5ed537a88be76ff85e5a7c5e4 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -16,6 +16,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
 	return 0;
 }
 
+static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev,
+				 unsigned long id)
+{
+	sun9i_mmc_reset_assert(rcdev, id);
+	udelay(10);
+	sun9i_mmc_reset_deassert(rcdev, id);
+
+	return 0;
+}
+
 static const struct reset_control_ops sun9i_mmc_reset_ops = {
 	.assert		= sun9i_mmc_reset_assert,
 	.deassert	= sun9i_mmc_reset_deassert,
+	.reset		= sun9i_mmc_reset_reset,
 };
 
 static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 58d4f4e1ad6a907991873a03027e6c7aa2f31fc4..ca38229b045ab288a2f250dddaf1b174e8c0572f 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,6 +22,8 @@
 
 #include "cpufreq_governor.h"
 
+#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL	(2 * TICK_NSEC / NSEC_PER_USEC)
+
 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
 
 static DEFINE_MUTEX(gov_dbs_data_mutex);
@@ -47,11 +49,15 @@ ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
 {
 	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 	struct policy_dbs_info *policy_dbs;
+	unsigned int sampling_interval;
 	int ret;
-	ret = sscanf(buf, "%u", &dbs_data->sampling_rate);
-	if (ret != 1)
+
+	ret = sscanf(buf, "%u", &sampling_interval);
+	if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
 		return -EINVAL;
 
+	dbs_data->sampling_rate = sampling_interval;
+
 	/*
 	 * We are operating under dbs_data->mutex and so the list and its
 	 * entries can't be freed concurrently.
@@ -430,7 +436,14 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
 	if (ret)
 		goto free_policy_dbs_info;
 
-	dbs_data->sampling_rate = cpufreq_policy_transition_delay_us(policy);
+	/*
+	 * The sampling interval should not be less than the transition latency
+	 * of the CPU and it also cannot be too small for dbs_update() to work
+	 * correctly.
+	 */
+	dbs_data->sampling_rate = max_t(unsigned int,
+					CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
+					cpufreq_policy_transition_delay_us(policy));
 
 	if (!have_governor_per_policy())
 		gov->gdbs_data = dbs_data;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 628fe899cb483da9dbf0f7661b537734bc82f784..d9b2c2de49c43f125c91b382f818ff81d0ffc6ac 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -226,17 +226,18 @@ static void imx6q_opp_check_speed_grading(struct device *dev)
 	val >>= OCOTP_CFG3_SPEED_SHIFT;
 	val &= 0x3;
 
-	if ((val != OCOTP_CFG3_SPEED_1P2GHZ) &&
-	     of_machine_is_compatible("fsl,imx6q"))
-		if (dev_pm_opp_disable(dev, 1200000000))
-			dev_warn(dev, "failed to disable 1.2GHz OPP\n");
 	if (val < OCOTP_CFG3_SPEED_996MHZ)
 		if (dev_pm_opp_disable(dev, 996000000))
 			dev_warn(dev, "failed to disable 996MHz OPP\n");
-	if (of_machine_is_compatible("fsl,imx6q")) {
+
+	if (of_machine_is_compatible("fsl,imx6q") ||
+	    of_machine_is_compatible("fsl,imx6qp")) {
 		if (val != OCOTP_CFG3_SPEED_852MHZ)
 			if (dev_pm_opp_disable(dev, 852000000))
 				dev_warn(dev, "failed to disable 852MHz OPP\n");
+		if (val != OCOTP_CFG3_SPEED_1P2GHZ)
+			if (dev_pm_opp_disable(dev, 1200000000))
+				dev_warn(dev, "failed to disable 1.2GHz OPP\n");
 	}
 	iounmap(base);
 put_node:
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index dfcf56ee3c6181fc64d0a074e2f5659fec35a6d8..76861a00bb92c4b449f346433f282da8e6cb82c9 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
  * category than their parents, so it won't report false recursion.
  */
 static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
 
 static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
 				 irq_hw_number_t hwirq)
@@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq,
 	ret = irq_set_chip_data(irq, d->host_data);
 	if (ret < 0)
 		return ret;
-	irq_set_lockdep_class(irq, &gpio_lock_class);
+	irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
 	irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
 	irq_set_noprobe(irq);
 
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 545d43a587b7ef1308dc827ffbea2c2dd733b478..bb4f8cf18bd9f6c7e47328aaf2ec1cdd3dfc3d6b 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -327,6 +327,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
  * category than their parents, so it won't report false recursion.
  */
 static struct lock_class_key brcmstb_gpio_irq_lock_class;
+static struct lock_class_key brcmstb_gpio_irq_request_class;
 
 
 static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
@@ -346,7 +347,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
 	ret = irq_set_chip_data(irq, &bank->gc);
 	if (ret < 0)
 		return ret;
-	irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+	irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
+			      &brcmstb_gpio_irq_request_class);
 	irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq);
 	irq_set_noprobe(irq);
 	return 0;
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index 23e771dba4c17ab7a497feb37c108f15fe117c79..e85903eddc68ecd5930df5ded45efbc398aa57ca 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
 	struct gpio_reg *r = to_gpio_reg(gc);
 	int irq = r->irqs[offset];
 
-	if (irq >= 0 && r->irq.domain)
-		irq = irq_find_mapping(r->irq.domain, irq);
+	if (irq >= 0 && r->irqdomain)
+		irq = irq_find_mapping(r->irqdomain, irq);
 
 	return irq;
 }
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 8db47f671708752dbaae420478935f1613847dd3..02fa8fe2292a13608e332d6247e9c0b55870ae98 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
  * than their parents, so it won't report false recursion.
  */
 static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
 
 static int tegra_gpio_probe(struct platform_device *pdev)
 {
@@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev)
 
 		bank = &tgi->bank_info[GPIO_BANK(gpio)];
 
-		irq_set_lockdep_class(irq, &gpio_lock_class);
+		irq_set_lockdep_class(irq, &gpio_lock_class,
+				      &gpio_request_class);
 		irq_set_chip_data(irq, bank);
 		irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
 	}
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2313af82fad3d4bb3cf7027bec9f25d9e6b5887c..acd59113e08b9cda0fce6930981520a481ec7203 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio)
 
 static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
 					 struct irq_data *irq_data,
-					 bool early)
+					 bool reserve)
 {
 	struct xgene_gpio_sb *priv = d->host_data;
 	u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index eb4528c87c0b3977420a2108c7feaaf9b2a95869..d6f3d9ee1350e422e3f373427d55d6ce8b6be565 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
 	}
 
 	if (!chip->names)
-		devprop_gpiochip_set_names(chip);
+		devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent));
 
 	acpi_gpiochip_request_regions(acpi_gpio);
 	acpi_gpiochip_scan_gpios(acpi_gpio);
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index 27f383bda7d9621322a3340d9def92354d21d7a3..f748aa3e77f72000b357718d5c955abba38c5c07 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -19,30 +19,27 @@
 /**
  * devprop_gpiochip_set_names - Set GPIO line names using device properties
  * @chip: GPIO chip whose lines should be named, if possible
+ * @fwnode: Property Node containing the gpio-line-names property
  *
  * Looks for device property "gpio-line-names" and if it exists assigns
  * GPIO line names for the chip. The memory allocated for the assigned
  * names belong to the underlying firmware node and should not be released
  * by the caller.
  */
-void devprop_gpiochip_set_names(struct gpio_chip *chip)
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+				const struct fwnode_handle *fwnode)
 {
 	struct gpio_device *gdev = chip->gpiodev;
 	const char **names;
 	int ret, i;
 
-	if (!chip->parent) {
-		dev_warn(&gdev->dev, "GPIO chip parent is NULL\n");
-		return;
-	}
-
-	ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+	ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
 						NULL, 0);
 	if (ret < 0)
 		return;
 
 	if (ret != gdev->ngpio) {
-		dev_warn(chip->parent,
+		dev_warn(&gdev->dev,
 			 "names %d do not match number of GPIOs %d\n", ret,
 			 gdev->ngpio);
 		return;
@@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip)
 	if (!names)
 		return;
 
-	ret = device_property_read_string_array(chip->parent, "gpio-line-names",
+	ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
 						names, gdev->ngpio);
 	if (ret < 0) {
-		dev_warn(chip->parent, "failed to read GPIO line names\n");
+		dev_warn(&gdev->dev, "failed to read GPIO line names\n");
 		kfree(names);
 		return;
 	}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index e0d59e61b52fa6aa53e7830ae1ab1c59ceec2453..72a0695d2ac3a3d3217462ff4ea85147921f5dcd 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
 	/* If the chip defines names itself, these take precedence */
 	if (!chip->names)
-		devprop_gpiochip_set_names(chip);
+		devprop_gpiochip_set_names(chip,
+					   of_fwnode_handle(chip->of_node));
 
 	of_node_get(chip->of_node);
 
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index aad84a6306c4e5ddbc3364d58add85fa1b1e2583..44332b793718afe1092f5c1a1f42a088b52ab6fb 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices);
 
 static void gpiochip_free_hogs(struct gpio_chip *chip);
 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
-				struct lock_class_key *key);
+				struct lock_class_key *lock_key,
+				struct lock_class_key *request_key);
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
@@ -1100,7 +1101,8 @@ static void gpiochip_setup_devs(void)
 }
 
 int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
-			       struct lock_class_key *key)
+			       struct lock_class_key *lock_key,
+			       struct lock_class_key *request_key)
 {
 	unsigned long	flags;
 	int		status = 0;
@@ -1246,7 +1248,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 	if (status)
 		goto err_remove_from_list;
 
-	status = gpiochip_add_irqchip(chip, key);
+	status = gpiochip_add_irqchip(chip, lock_key, request_key);
 	if (status)
 		goto err_remove_chip;
 
@@ -1632,7 +1634,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
 	 * This lock class tells lockdep that GPIO irqs are in a different
 	 * category than their parents, so it won't report false recursion.
 	 */
-	irq_set_lockdep_class(irq, chip->irq.lock_key);
+	irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key);
 	irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler);
 	/* Chips that use nested thread handlers have them marked */
 	if (chip->irq.threaded)
@@ -1712,10 +1714,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
 /**
  * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip
  * @gpiochip: the GPIO chip to add the IRQ chip to
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
  */
 static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
-				struct lock_class_key *lock_key)
+				struct lock_class_key *lock_key,
+				struct lock_class_key *request_key)
 {
 	struct irq_chip *irqchip = gpiochip->irq.chip;
 	const struct irq_domain_ops *ops;
@@ -1753,6 +1757,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
 	gpiochip->to_irq = gpiochip_to_irq;
 	gpiochip->irq.default_type = type;
 	gpiochip->irq.lock_key = lock_key;
+	gpiochip->irq.request_key = request_key;
 
 	if (gpiochip->irq.domain_ops)
 		ops = gpiochip->irq.domain_ops;
@@ -1850,7 +1855,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
  * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
  * to have the core avoid setting up any default type in the hardware.
  * @threaded: whether this irqchip uses a nested thread handler
- * @lock_key: lockdep class
+ * @lock_key: lockdep class for IRQ lock
+ * @request_key: lockdep class for IRQ request
  *
  * This function closely associates a certain irqchip with a certain
  * gpiochip, providing an irq domain to translate the local IRQs to
@@ -1872,7 +1878,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
 			     irq_flow_handler_t handler,
 			     unsigned int type,
 			     bool threaded,
-			     struct lock_class_key *lock_key)
+			     struct lock_class_key *lock_key,
+			     struct lock_class_key *request_key)
 {
 	struct device_node *of_node;
 
@@ -1913,6 +1920,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
 	gpiochip->irq.default_type = type;
 	gpiochip->to_irq = gpiochip_to_irq;
 	gpiochip->irq.lock_key = lock_key;
+	gpiochip->irq.request_key = request_key;
 	gpiochip->irq.domain = irq_domain_add_simple(of_node,
 					gpiochip->ngpio, first_irq,
 					&gpiochip_domain_ops, gpiochip);
@@ -1940,7 +1948,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
 static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
-				       struct lock_class_key *key)
+				       struct lock_class_key *lock_key,
+				       struct lock_class_key *request_key)
 {
 	return 0;
 }
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index af48322839c3d6004ee16a32591f1f434d364fc8..6c44d165213910a259ee94c85731ab4d1ca68431 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -228,7 +228,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
 	return desc - &desc->gdev->descs[0];
 }
 
-void devprop_gpiochip_set_names(struct gpio_chip *chip);
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+				const struct fwnode_handle *fwnode);
 
 /* With descriptor prefix */
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index da43813d67a4ad56ddecb79ac0a749afe29abc43..5aeb5f8816f3b9a68666cf57372cddeb12c2b36a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
 				  PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
 				  PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
 				  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
-				  PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
+				  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
 				  PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
 				  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
 		amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f71fe6d2ddda795fd2fb914740b75845893c1298..bb5fa895fb6446097580ce229ef23dc473f979af 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 		       const struct dm_connector_state *dm_state)
 {
 	struct drm_display_mode *preferred_mode = NULL;
-	const struct drm_connector *drm_connector;
+	struct drm_connector *drm_connector;
 	struct dc_stream_state *stream = NULL;
 	struct drm_display_mode mode = *drm_mode;
 	bool native_mode_found = false;
@@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
 	if (!aconnector->dc_sink) {
 		/*
-		 * Exclude MST from creating fake_sink
-		 * TODO: need to enable MST into fake_sink feature
+		 * Create dc_sink when necessary to MST
+		 * Don't apply fake_sink to MST
 		 */
-		if (aconnector->mst_port)
-			goto stream_create_fail;
+		if (aconnector->mst_port) {
+			dm_dp_mst_dc_sink_create(drm_connector);
+			goto mst_dc_sink_create_done;
+		}
 
 		if (create_fake_sink(aconnector))
 			goto stream_create_fail;
@@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 stream_create_fail:
 dm_state_null:
 drm_connector_null:
+mst_dc_sink_create_done:
 	return stream;
 }
 
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 117521c6a6ed26213c60ec5316df64b66eeaba12..0230250a1164bb01b41f3a2b22011960909e14bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
 	struct mutex hpd_lock;
 
 	bool fake_enable;
+
+	bool mst_connected;
 };
 
 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f8efb98b1fa72f86ecbec4c568a653af164c7daa..638c2c2b5cd79069e7312b7d7f23a28b6f5eb3b6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
 	return ret;
 }
 
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+	struct edid *edid;
+	struct dc_sink *dc_sink;
+	struct dc_sink_init_data init_params = {
+			.link = aconnector->dc_link,
+			.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+	edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+	if (!edid) {
+		drm_mode_connector_update_edid_property(
+			&aconnector->base,
+			NULL);
+		return;
+	}
+
+	aconnector->edid = edid;
+
+	dc_sink = dc_link_add_remote_sink(
+		aconnector->dc_link,
+		(uint8_t *)aconnector->edid,
+		(aconnector->edid->extensions + 1) * EDID_LENGTH,
+		&init_params);
+
+	dc_sink->priv = aconnector;
+	aconnector->dc_sink = dc_sink;
+
+	amdgpu_dm_add_sink_to_freesync_module(
+			connector, aconnector->edid);
+
+	drm_mode_connector_update_edid_property(
+					&aconnector->base, aconnector->edid);
+}
+
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 			drm_mode_connector_set_path_property(connector, pathprop);
 
 			drm_connector_list_iter_end(&conn_iter);
+			aconnector->mst_connected = true;
 			return &aconnector->base;
 		}
 	}
@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 	 */
 	amdgpu_dm_connector_funcs_reset(connector);
 
+	aconnector->mst_connected = true;
+
 	DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
 			aconnector, connector->base.id, aconnector->mst_port);
 
@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 	drm_mode_connector_update_edid_property(
 			&aconnector->base,
 			NULL);
+
+	aconnector->mst_connected = false;
 }
 
 static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 	drm_kms_helper_hotplug_event(dev);
 }
 
+static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
+{
+	mutex_lock(&connector->dev->mode_config.mutex);
+	drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+	mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
 static void dm_dp_mst_register_connector(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct amdgpu_device *adev = dev->dev_private;
+	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
 	if (adev->mode_info.rfbdev)
 		drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
 
 	drm_connector_register(connector);
 
+	if (aconnector->mst_connected)
+		dm_dp_mst_link_status_reset(connector);
 }
 
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042aee9b79eb2c666d45c0f5061fee0..8cf51da26657e29e72062b34aeed7e5d827f9e21 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
 				       struct amdgpu_dm_connector *aconnector);
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
 
 #endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3dce35e66b0917d2ec93420063b3477443788302..b142629a105841b603501291800e45b9ade30591 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
 			v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
 			v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
 			v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+			/*
+			 * Spreadsheet doesn't handle taps_c is one properly,
+			 * need to force Chroma to always be scaled to pass
+			 * bandwidth validation.
+			 */
+			if (v->override_hta_pschroma[input_idx] == 1)
+				v->override_hta_pschroma[input_idx] = 2;
+			if (v->override_vta_pschroma[input_idx] == 1)
+				v->override_vta_pschroma[input_idx] = 2;
 			v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
 		}
 		if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index e27ed4a45265290690604b10e6d4df4fbee77514..42a111b9505dcb5190437a381c7dba8fda444719 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
 		link->link_enc->funcs->disable_output(link->link_enc, signal, link);
 }
 
-bool dp_active_dongle_validate_timing(
+static bool dp_active_dongle_validate_timing(
 		const struct dc_crtc_timing *timing,
 		const struct dc_dongle_caps *dongle_caps)
 {
@@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
 	/* Check Color Depth and Pixel Clock */
 	if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
 		required_pix_clk /= 2;
+	else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+		required_pix_clk = required_pix_clk * 2 / 3;
 
 	switch (timing->display_color_depth) {
 	case COLOR_DEPTH_666:
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 07ff8d2faf3f4630276d9241092f605274375cda..d844fadcd56f048739e374cb8d534cba10d235b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
 		int num_planes,
 		struct dc_state *context)
 {
-	int i, be_idx;
+	int i;
 
 	if (num_planes == 0)
 		return;
 
-	be_idx = -1;
 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
-		if (stream == context->res_ctx.pipe_ctx[i].stream) {
-			be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
-			break;
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+		if (stream == pipe_ctx->stream) {
+			if (!pipe_ctx->top_pipe &&
+				(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+				dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
 		}
 	}
 
@@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
 					context->stream_count);
 
 		dce110_program_front_end_for_pipe(dc, pipe_ctx);
+
+		dc->hwss.update_plane_addr(dc, pipe_ctx);
+
 		program_surface_visibility(dc, pipe_ctx);
 
 	}
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+		if ((stream == pipe_ctx->stream) &&
+			(!pipe_ctx->top_pipe) &&
+			(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
+	}
 }
 
 static void dce110_power_down_fe(struct dc *dc, int fe_idx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 74e7c82bdc76a71080d8e22dd6db90179ffee54e..a9d55d0dd69e009f4a31038c9529c86c05904ec7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
 			scl_data->taps.h_taps = 1;
 		if (IDENTITY_RATIO(scl_data->ratios.vert))
 			scl_data->taps.v_taps = 1;
-		/*
-		 * Spreadsheet doesn't handle taps_c is one properly,
-		 * need to force Chroma to always be scaled to pass
-		 * bandwidth validation.
-		 */
+		if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+			scl_data->taps.h_taps_c = 1;
+		if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+			scl_data->taps.v_taps_c = 1;
 	}
 
 	return true;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 59849f02e2ad5bb74559ea85fbeb6fc1dd97bde6..1402c0e71b03d18866139056b12f0d5fd84b6afb 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
 
 	mutex_lock(&dev->mode_config.idr_mutex);
 
-	/* Insert the new lessee into the tree */
-	id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
-	if (id < 0) {
-		error = id;
-		goto out_lessee;
-	}
-
-	lessee->lessee_id = id;
-	lessee->lessor = drm_master_get(lessor);
-	list_add_tail(&lessee->lessee_list, &lessor->lessees);
-
 	idr_for_each_entry(leases, entry, object) {
 		error = 0;
 		if (!idr_find(&dev->mode_config.crtc_idr, object))
@@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
 		}
 	}
 
+	/* Insert the new lessee into the tree */
+	id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL);
+	if (id < 0) {
+		error = id;
+		goto out_lessee;
+	}
+
+	lessee->lessee_id = id;
+	lessee->lessor = drm_master_get(lessor);
+	list_add_tail(&lessee->lessee_list, &lessor->lessees);
+
 	/* Move the leases over */
 	lessee->leases = *leases;
 	DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 37a93cdffb4ad0e7986a634df4d70ccc3fef286e..2c90519576a3e8b63a4c8361f18672db853ebcec 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
 }
 
 /*
- * setplane_internal - setplane handler for internal callers
+ * __setplane_internal - setplane handler for internal callers
  *
- * Note that we assume an extra reference has already been taken on fb.  If the
- * update fails, this reference will be dropped before return; if it succeeds,
- * the previous framebuffer (if any) will be unreferenced instead.
+ * This function will take a reference on the new fb for the plane
+ * on success.
  *
  * src_{x,y,w,h} are provided in 16.16 fixed point format
  */
@@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane,
 	if (!ret) {
 		plane->crtc = crtc;
 		plane->fb = fb;
-		fb = NULL;
+		drm_framebuffer_get(plane->fb);
 	} else {
 		plane->old_fb = NULL;
 	}
 
 out:
-	if (fb)
-		drm_framebuffer_put(fb);
 	if (plane->old_fb)
 		drm_framebuffer_put(plane->old_fb);
 	plane->old_fb = NULL;
@@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
 	struct drm_plane *plane;
 	struct drm_crtc *crtc = NULL;
 	struct drm_framebuffer *fb = NULL;
+	int ret;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
@@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
 		}
 	}
 
-	/*
-	 * setplane_internal will take care of deref'ing either the old or new
-	 * framebuffer depending on success.
-	 */
-	return setplane_internal(plane, crtc, fb,
-				 plane_req->crtc_x, plane_req->crtc_y,
-				 plane_req->crtc_w, plane_req->crtc_h,
-				 plane_req->src_x, plane_req->src_y,
-				 plane_req->src_w, plane_req->src_h);
+	ret = setplane_internal(plane, crtc, fb,
+				plane_req->crtc_x, plane_req->crtc_y,
+				plane_req->crtc_w, plane_req->crtc_h,
+				plane_req->src_x, plane_req->src_y,
+				plane_req->src_w, plane_req->src_h);
+
+	if (fb)
+		drm_framebuffer_put(fb);
+
+	return ret;
 }
 
 static int drm_mode_cursor_universal(struct drm_crtc *crtc,
@@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
 		src_h = fb->height << 16;
 	}
 
-	/*
-	 * setplane_internal will take care of deref'ing either the old or new
-	 * framebuffer depending on success.
-	 */
 	ret = __setplane_internal(crtc->cursor, crtc, fb,
-				crtc_x, crtc_y, crtc_w, crtc_h,
-				0, 0, src_w, src_h, ctx);
+				  crtc_x, crtc_y, crtc_w, crtc_h,
+				  0, 0, src_w, src_h, ctx);
+
+	if (fb)
+		drm_framebuffer_put(fb);
 
 	/* Update successful; save new cursor position, if necessary */
 	if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index f776fc1cc543abf8e752a5133aaf1ca63fb2d8ff..cb4d09c70fd44647f30b6d10244f25e90db0835f 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = {
 	.release = drm_syncobj_file_release,
 };
 
-static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
-{
-	struct file *file = anon_inode_getfile("syncobj_file",
-					       &drm_syncobj_file_fops,
-					       syncobj, 0);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	drm_syncobj_get(syncobj);
-	if (cmpxchg(&syncobj->file, NULL, file)) {
-		/* lost the race */
-		fput(file);
-	}
-
-	return 0;
-}
-
 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
 {
-	int ret;
+	struct file *file;
 	int fd;
 
 	fd = get_unused_fd_flags(O_CLOEXEC);
 	if (fd < 0)
 		return fd;
 
-	if (!syncobj->file) {
-		ret = drm_syncobj_alloc_file(syncobj);
-		if (ret) {
-			put_unused_fd(fd);
-			return ret;
-		}
+	file = anon_inode_getfile("syncobj_file",
+				  &drm_syncobj_file_fops,
+				  syncobj, 0);
+	if (IS_ERR(file)) {
+		put_unused_fd(fd);
+		return PTR_ERR(file);
 	}
-	fd_install(fd, syncobj->file);
+
+	drm_syncobj_get(syncobj);
+	fd_install(fd, file);
+
 	*p_fd = fd;
 	return 0;
 }
@@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
 	return ret;
 }
 
-static struct drm_syncobj *drm_syncobj_fdget(int fd)
-{
-	struct file *file = fget(fd);
-
-	if (!file)
-		return NULL;
-	if (file->f_op != &drm_syncobj_file_fops)
-		goto err;
-
-	return file->private_data;
-err:
-	fput(file);
-	return NULL;
-};
-
 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
 				    int fd, u32 *handle)
 {
-	struct drm_syncobj *syncobj = drm_syncobj_fdget(fd);
+	struct drm_syncobj *syncobj;
+	struct file *file;
 	int ret;
 
-	if (!syncobj)
+	file = fget(fd);
+	if (!file)
 		return -EINVAL;
 
+	if (file->f_op != &drm_syncobj_file_fops) {
+		fput(file);
+		return -EINVAL;
+	}
+
 	/* take a reference to put in the idr */
+	syncobj = file->private_data;
 	drm_syncobj_get(syncobj);
 
 	idr_preload(GFP_KERNEL);
@@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
 	spin_unlock(&file_private->syncobj_table_lock);
 	idr_preload_end();
 
-	if (ret < 0) {
-		fput(syncobj->file);
-		return ret;
-	}
-	*handle = ret;
-	return 0;
+	if (ret > 0) {
+		*handle = ret;
+		ret = 0;
+	} else
+		drm_syncobj_put(syncobj);
+
+	fput(file);
+	return ret;
 }
 
 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 355120865efd14873726e8eae2e1ec6d6fb31b9f..309f3fa6794a92554c3e7da74429f162090da098 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
 	/* Clear host CRT status, so guest couldn't detect this host CRT. */
 	if (IS_BROADWELL(dev_priv))
 		vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+
+	vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
 }
 
 static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
 				    int type, unsigned int resolution)
 {
-	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
 
 	if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
 	port->type = type;
 
 	emulate_monitor_status_change(vgpu);
-	vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad4050f7ab3b6965db1ce1b8d150036354464078..18de6569d04aef46aad4af88edb20f0a94d91b53 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	 * must wait for all rendering to complete to the object (as unbinding
 	 * must anyway), and retire the requests.
 	 */
-	ret = i915_gem_object_wait(obj,
-				   I915_WAIT_INTERRUPTIBLE |
-				   I915_WAIT_LOCKED |
-				   I915_WAIT_ALL,
-				   MAX_SCHEDULE_TIMEOUT,
-				   NULL);
+	ret = i915_gem_object_set_to_cpu_domain(obj, false);
 	if (ret)
 		return ret;
 
-	i915_gem_retire_requests(to_i915(obj->base.dev));
-
 	while ((vma = list_first_entry_or_null(&obj->vma_list,
 					       struct i915_vma,
 					       obj_link))) {
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index e8ca67a129d28da6ef5b9d1de9d8357f7dc02f7e..ac236b88c99ca0fb07c49ec5d9487b1e0b38adfe 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb {
 	struct dma_fence *dma;
 	struct timer_list timer;
 	struct irq_work work;
+	struct rcu_head rcu;
 };
 
 static void timer_i915_sw_fence_wake(struct timer_list *t)
@@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
 	del_timer_sync(&cb->timer);
 	dma_fence_put(cb->dma);
 
-	kfree(cb);
+	kfree_rcu(cb, rcu);
 }
 
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 5f8b9f1f40f19e84968c18e5fbd229731b392dea..bcbc7abe66935eef46c9799451b939dc6d516639 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
 	struct intel_wait *wait, *n, *first;
 
 	if (!b->irq_armed)
-		return;
+		goto wakeup_signaler;
 
 	/* We only disarm the irq when we are idle (all requests completed),
 	 * so if the bottom-half remains asleep, it missed the request
@@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
 	b->waiters = RB_ROOT;
 
 	spin_unlock_irq(&b->rb_lock);
+
+	/*
+	 * The signaling thread may be asleep holding a reference to a request,
+	 * that had its signaling cancelled prior to being preempted. We need
+	 * to kick the signaler, just in case, to release any such reference.
+	 */
+wakeup_signaler:
+	wake_up_process(b->signaler);
 }
 
 static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -651,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg)
 		}
 
 		if (unlikely(do_schedule)) {
-			DEFINE_WAIT(exec);
-
 			if (kthread_should_park())
 				kthread_parkme();
 
-			if (kthread_should_stop()) {
-				GEM_BUG_ON(request);
+			if (unlikely(kthread_should_stop())) {
+				i915_gem_request_put(request);
 				break;
 			}
 
-			if (request)
-				add_wait_queue(&request->execute, &exec);
-
 			schedule();
-
-			if (request)
-				remove_wait_queue(&request->execute, &exec);
 		}
 		i915_gem_request_put(request);
 	} while (1);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e0843bb991699d0c81ec8242d1809a3e26527af8..58a3755544b292dfdd3c7e089f9179c51861d76d 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2128,6 +2128,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 	if (WARN_ON(!pll))
 		return;
 
+	 mutex_lock(&dev_priv->dpll_lock);
+
 	if (IS_CANNONLAKE(dev_priv)) {
 		/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
 		val = I915_READ(DPCLKA_CFGCR0);
@@ -2157,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
 	} else if (INTEL_INFO(dev_priv)->gen < 9) {
 		I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
 	}
+
+	mutex_unlock(&dev_priv->dpll_lock);
 }
 
 static void intel_ddi_clk_disable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e8ccf89cb17b6843b878ec0ef0641055d68e1cf8..30cf273d57aa5a7b91b77928a9a971cba478b8c0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9944,11 +9944,10 @@ found:
 	}
 
 	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+	drm_framebuffer_put(fb);
 	if (ret)
 		goto fail;
 
-	drm_framebuffer_put(fb);
-
 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
 	if (ret)
 		goto fail;
@@ -13195,7 +13194,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
 	primary->check_plane = intel_check_primary_plane;
 
-	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+	if (INTEL_GEN(dev_priv) >= 10) {
 		intel_primary_formats = skl_primary_formats;
 		num_formats = ARRAY_SIZE(skl_primary_formats);
 		modifiers = skl_format_modifiers_ccs;
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 3bf65288ffffd51719d0c4e8ce934ccd2d2f59e3..5809b29044fc573401f6116bb45762fc07e71596 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
 		};
 
 		if (!pci_dev_present(atom_hdaudio_ids)) {
-			DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
+			DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
 			lpe_present = true;
 		}
 	}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2615912430cc97098f0fe806e95e5e40c1ee96f7..435ff8662cfa823a56f5d84a8fe66d4bc8929230 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 		/* Determine if we can get a cache-coherent map, forcing
 		 * uncached mapping if we can't.
 		 */
-		if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
+		if (!nouveau_drm_use_coherent_gpu_mapping(drm))
 			nvbo->force_coherent = true;
 	}
 
@@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
 		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
 		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
 			continue;
-		if ((flags & TTM_PL_FLAG_TT  ) && !vmm->page[i].host)
+		if ((flags & TTM_PL_FLAG_TT) &&
+		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
 			continue;
 
 		/* Select this page size if it's the first that supports
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8d4a5be3b913016c410c8226ad5b05b0bf75299b..56fe261b62683a8690ac8f3439426ca1e4c269ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -152,9 +152,9 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence,
 	work->cli = cli;
 	mutex_lock(&cli->lock);
 	list_add_tail(&work->head, &cli->worker);
-	mutex_unlock(&cli->lock);
 	if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence))
 		nouveau_cli_work_fence(fence, &work->cb);
+	mutex_unlock(&cli->lock);
 }
 
 static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 3331e82ae9e7130b18f4a6f307cc284519f873d3..96f6bd8aee5d3a248d76c683b6146ebb8ef673c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -157,8 +157,8 @@ struct nouveau_drm {
 		struct nvif_object copy;
 		int mtrr;
 		int type_vram;
-		int type_host;
-		int type_ncoh;
+		int type_host[2];
+		int type_ncoh[2];
 	} ttm;
 
 	/* GEM interface support */
@@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev)
 	return dev->dev_private;
 }
 
+static inline bool
+nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm)
+{
+	struct nvif_mmu *mmu = &drm->client.mmu;
+	return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED);
+}
+
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
 bool nouveau_pmops_runtime(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index c533d8e04afc0f1fc4708d85e069323291c428c3..be7357bf2246e6ae326c9b6750c2c183cb0974d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
 	drm_fb_helper_unregister_fbi(&fbcon->helper);
 	drm_fb_helper_fini(&fbcon->helper);
 
-	if (nouveau_fb->nvbo) {
+	if (nouveau_fb && nouveau_fb->nvbo) {
 		nouveau_vma_del(&nouveau_fb->vma);
 		nouveau_bo_unmap(nouveau_fb->nvbo);
 		nouveau_bo_unpin(nouveau_fb->nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 589a9621db763f98454485081a3f80e2324e717c..c002f896850739b343624247e7d52d94e34bf99d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
 	u8 type;
 	int ret;
 
-	if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED)
-		type = drm->ttm.type_ncoh;
+	if (!nouveau_drm_use_coherent_gpu_mapping(drm))
+		type = drm->ttm.type_ncoh[!!mem->kind];
 	else
-		type = drm->ttm.type_host;
+		type = drm->ttm.type_host[0];
 
 	if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND))
 		mem->comp = mem->kind = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 08b974b3048279813e2d67ad0d5b0055e68998c2..dff51a0ee0281e8f5924ffc0135d8b4baf8542f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -235,27 +235,46 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
 	drm->ttm.mem_global_ref.release = NULL;
 }
 
-int
-nouveau_ttm_init(struct nouveau_drm *drm)
+static int
+nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 {
-	struct nvkm_device *device = nvxx_device(&drm->client.device);
-	struct nvkm_pci *pci = device->pci;
 	struct nvif_mmu *mmu = &drm->client.mmu;
-	struct drm_device *dev = drm->dev;
-	int typei, ret;
+	int typei;
 
 	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
-						   NVIF_MEM_COHERENT);
+					    kind | NVIF_MEM_COHERENT);
 	if (typei < 0)
 		return -ENOSYS;
 
-	drm->ttm.type_host = typei;
+	drm->ttm.type_host[!!kind] = typei;
 
-	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE);
+	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
 	if (typei < 0)
 		return -ENOSYS;
 
-	drm->ttm.type_ncoh = typei;
+	drm->ttm.type_ncoh[!!kind] = typei;
+	return 0;
+}
+
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+	struct nvkm_device *device = nvxx_device(&drm->client.device);
+	struct nvkm_pci *pci = device->pci;
+	struct nvif_mmu *mmu = &drm->client.mmu;
+	struct drm_device *dev = drm->dev;
+	int typei, ret;
+
+	ret = nouveau_ttm_init_host(drm, 0);
+	if (ret)
+		return ret;
+
+	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+	    drm->client.device.info.chipset != 0x50) {
+		ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
+		if (ret)
+			return ret;
+	}
 
 	if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
 	    drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c
index 9e2628dd8e4d6734c2d7c5d073012bbb95b4fa4c..f5371d96b003c23cac9e1f34cf3deca3b54b06a6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c
@@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma)
 			nvif_vmm_put(&vma->vmm->vmm, &tmp);
 		}
 		list_del(&vma->head);
-		*pvma = NULL;
 		kfree(*pvma);
+		*pvma = NULL;
 	}
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e146436156985a534fa14e0829db4560d6eb1459..00eeaaffeae565a04044fc55e52990eb71d1063b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2369,7 +2369,7 @@ nv13b_chipset = {
 	.imem = gk20a_instmem_new,
 	.ltc = gp100_ltc_new,
 	.mc = gp10b_mc_new,
-	.mmu = gf100_mmu_new,
+	.mmu = gp10b_mmu_new,
 	.secboot = gp10b_secboot_new,
 	.pmu = gm20b_pmu_new,
 	.timer = gk20a_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 972370ed36f090d0c0323253b79735edd355db07..7c7efa4ea0d0edb391a27db2c6e99179799070f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 			if (data) {
 				*ver = nvbios_rd08(bios, data + 0x00);
 				switch (*ver) {
+				case 0x20:
 				case 0x21:
 				case 0x30:
 				case 0x40:
@@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
 	if (data && idx < *cnt) {
 		u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len);
 		switch (*ver * !!outp) {
+		case 0x20:
 		case 0x21:
 		case 0x30:
 			*hdr = nvbios_rd08(bios, data + 0x04);
@@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
 		info->type = nvbios_rd16(bios, data + 0x00);
 		info->mask = nvbios_rd16(bios, data + 0x02);
 		switch (*ver) {
+		case 0x20:
+			info->mask |= 0x00c0; /* match any link */
+			/* fall-through */
 		case 0x21:
 		case 0x30:
 			info->flags     = nvbios_rd08(bios, data + 0x05);
 			info->script[0] = nvbios_rd16(bios, data + 0x06);
 			info->script[1] = nvbios_rd16(bios, data + 0x08);
-			info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
+			if (*len >= 0x0c)
+				info->lnkcmp    = nvbios_rd16(bios, data + 0x0a);
 			if (*len >= 0x0f) {
 				info->script[2] = nvbios_rd16(bios, data + 0x0c);
 				info->script[3] = nvbios_rd16(bios, data + 0x0e);
@@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 	memset(info, 0x00, sizeof(*info));
 	if (data) {
 		switch (*ver) {
+		case 0x20:
 		case 0x21:
 			info->dc    = nvbios_rd08(bios, data + 0x02);
 			info->pe    = nvbios_rd08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index 1ba7289684aa2116b6fcc4d05869f0d2b8322a39..db48a1daca0c7a3d786332ce25435839fcc10760 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
 			iobj->base.memory.ptrs = &nv50_instobj_fast;
 		else
 			iobj->base.memory.ptrs = &nv50_instobj_slow;
-		refcount_inc(&iobj->maps);
+		refcount_set(&iobj->maps, 1);
 	}
 
 	mutex_unlock(&imem->subdev.mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index b1b1f3626b96298fcdb76f1819fd7b97801d5b37..deb96de54b0030244ec88014bce526119c3fae91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
 		return ret;
 
 	pci->irq = pdev->irq;
+
+	/* Ensure MSI interrupts are armed, for the case where there are
+	 * already interrupts pending (for whatever reason) at load time.
+	 */
+	if (pci->msi)
+		pci->func->msi_rearm(pci);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index dda904ec0534cd9d84d3967b94bf5fa4f444df9e..500b6fb3e0284d2fdfc71265a64f0d5b51fe4f99 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder,
 	writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG);
 }
 
+static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder,
+					const struct drm_display_mode *mode)
+{
+	struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder);
+	unsigned long rate = mode->clock * 1000;
+	unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */
+	long rounded_rate;
+
+	/* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */
+	if (rate > 165000000)
+		return MODE_CLOCK_HIGH;
+	rounded_rate = clk_round_rate(hdmi->tmds_clk, rate);
+	if (rounded_rate > 0 &&
+	    max_t(unsigned long, rounded_rate, rate) -
+	    min_t(unsigned long, rounded_rate, rate) < diff)
+		return MODE_OK;
+	return MODE_NOCLOCK;
+}
+
 static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
 	.atomic_check	= sun4i_hdmi_atomic_check,
 	.disable	= sun4i_hdmi_disable,
 	.enable		= sun4i_hdmi_enable,
 	.mode_set	= sun4i_hdmi_mode_set,
+	.mode_valid	= sun4i_hdmi_mode_valid,
 };
 
 static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index e122f5b2a395583cc14302a9bc4166fbba671071..f4284b51bdca99a04e8eda109a4d67bf5c9fac74 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
 	if (IS_ERR(tcon->crtc)) {
 		dev_err(dev, "Couldn't create our CRTC\n");
 		ret = PTR_ERR(tcon->crtc);
-		goto err_free_clocks;
+		goto err_free_dotclock;
 	}
 
 	ret = sun4i_rgb_init(drm, tcon);
 	if (ret < 0)
-		goto err_free_clocks;
+		goto err_free_dotclock;
 
 	if (tcon->quirks->needs_de_be_mux) {
 		/*
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 44343a2bf55c65458a196b5968b0c494f1c569b0..b5ba6441489f6e4f28f6e71129dfede3361bd262 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		freed += (nr_free_pool - shrink_pages) << pool->order;
 		if (freed >= sc->nr_to_scan)
 			break;
+		shrink_pages <<= pool->order;
 	}
 	mutex_unlock(&lock);
 	return freed;
@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
 	int r = 0;
 	unsigned i, j, cpages;
 	unsigned npages = 1 << order;
-	unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
+	unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
 
 	/* allocate array for page caching change */
 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f3fcb836a1f9edd0af5b1f31362987d22eff11b2..0c3f608131cff48fdbec637da27135d970534807 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -551,7 +551,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
 		ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
 		break;
 	default:
-		hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
+		hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
 		ret = 0;
 	}
 
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 68cdc962265b1051d7e399e0c6a181a4426673ed..271f31461da427d93459632b096c578d71f3ee44 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -696,8 +696,16 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
 					      (u8 *)&word, 2);
 		break;
 	case I2C_SMBUS_I2C_BLOCK_DATA:
-		size = I2C_SMBUS_BLOCK_DATA;
-		/* fallthrough */
+		if (read_write == I2C_SMBUS_READ) {
+			read_length = data->block[0];
+			count = cp2112_write_read_req(buf, addr, read_length,
+						      command, NULL, 0);
+		} else {
+			count = cp2112_write_req(buf, addr, command,
+						 data->block + 1,
+						 data->block[0]);
+		}
+		break;
 	case I2C_SMBUS_BLOCK_DATA:
 		if (I2C_SMBUS_READ == read_write) {
 			count = cp2112_write_read_req(buf, addr,
@@ -785,6 +793,9 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
 	case I2C_SMBUS_WORD_DATA:
 		data->word = le16_to_cpup((__le16 *)buf);
 		break;
+	case I2C_SMBUS_I2C_BLOCK_DATA:
+		memcpy(data->block + 1, buf, read_length);
+		break;
 	case I2C_SMBUS_BLOCK_DATA:
 		if (read_length > I2C_SMBUS_BLOCK_MAX) {
 			ret = -EPROTO;
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 9325545fc3ae1cac4e3919c1120af6841590d4f4..edc0f64bb584806f080c1b6f682c53a20acc8466 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -32,10 +32,6 @@
 
 #ifdef CONFIG_HOLTEK_FF
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
-MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
-
 /*
  * These commands and parameters are currently known:
  *
@@ -223,3 +219,7 @@ static struct hid_driver holtek_driver = {
 	.probe = holtek_probe,
 };
 module_hid_driver(holtek_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 76ed9a216f101e90ddaa3bb38a1e0e7175ea81d4..610223f0e94530635ac94ccf40b9f582c57d6d62 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1378,6 +1378,8 @@ void vmbus_device_unregister(struct hv_device *device_obj)
 	pr_debug("child device %s unregistered\n",
 		dev_name(&device_obj->device));
 
+	kset_unregister(device_obj->channels_kset);
+
 	/*
 	 * Kick off the process of unregistering the device.
 	 * This will call vmbus_remove() and eventually vmbus_device_release()
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c9790e2c344016e91c26d02f4a69e6a15035b62a..af5123042990281c694311da2819107dd8336afb 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -143,6 +143,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
 				    struct hwmon_device *hwdev, int index)
 {
 	struct hwmon_thermal_data *tdata;
+	struct thermal_zone_device *tzd;
 
 	tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL);
 	if (!tdata)
@@ -151,8 +152,14 @@ static int hwmon_thermal_add_sensor(struct device *dev,
 	tdata->hwdev = hwdev;
 	tdata->index = index;
 
-	devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
-					     &hwmon_thermal_ops);
+	tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+						   &hwmon_thermal_ops);
+	/*
+	 * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
+	 * so ignore that error but forward any other error.
+	 */
+	if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
+		return PTR_ERR(tzd);
 
 	return 0;
 }
@@ -621,14 +628,20 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
 				if (!chip->ops->is_visible(drvdata, hwmon_temp,
 							   hwmon_temp_input, j))
 					continue;
-				if (info[i]->config[j] & HWMON_T_INPUT)
-					hwmon_thermal_add_sensor(dev, hwdev, j);
+				if (info[i]->config[j] & HWMON_T_INPUT) {
+					err = hwmon_thermal_add_sensor(dev,
+								hwdev, j);
+					if (err)
+						goto free_device;
+				}
 			}
 		}
 	}
 
 	return hdev;
 
+free_device:
+	device_unregister(hdev);
 free_hwmon:
 	kfree(hwdev);
 ida_remove:
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index feafdb961c485c61e3842d6a946d83b1bf7176b8..59b2f96d986aa2b86491d29b8d7c3e25f244ba6f 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
 	if (ret)
 		return ret;
 
+	if (!qp->qp_sec)
+		return 0;
+
 	mutex_lock(&real_qp->qp_sec->mutex);
 	ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
 					  qp->qp_sec);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d0202bb176a4a6a826b27f2b4327691e334ad4ea..840b24096690ddcdef6ce894c5ee21b6285bf238 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2074,8 +2074,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
 		return -EOPNOTSUPP;
 
 	if (ucore->inlen > sizeof(cmd)) {
-		if (ib_is_udata_cleared(ucore, sizeof(cmd),
-					ucore->inlen - sizeof(cmd)))
+		if (!ib_is_udata_cleared(ucore, sizeof(cmd),
+					 ucore->inlen - sizeof(cmd)))
 			return -EOPNOTSUPP;
 	}
 
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3fb8fb6cc824ef09f9c9c229e5a99a3e4801a65e..e36d27ed4daae3d46cb88b3ab4f747c34e6abb49 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1438,7 +1438,8 @@ int ib_close_qp(struct ib_qp *qp)
 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 
 	atomic_dec(&real_qp->usecnt);
-	ib_close_shared_qp_security(qp->qp_sec);
+	if (qp->qp_sec)
+		ib_close_shared_qp_security(qp->qp_sec);
 	kfree(qp);
 
 	return 0;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b7bfc536e00fd8c7b241c0f56539d1394d235acf..6f2b26126c64a4503b6a3bf8b8c3991b65b65012 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,7 +395,7 @@ next_cqe:
 
 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
 {
-	if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
+	if (DRAIN_CQE(cqe)) {
 		WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
 		return 0;
 	}
@@ -494,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 	/*
 	 * Special cqe for drain WR completions...
 	 */
-	if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+	if (DRAIN_CQE(hw_cqe)) {
 		*cookie = CQE_DRAIN_COOKIE(hw_cqe);
 		*cqe = *hw_cqe;
 		goto skip_cqe;
@@ -571,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 			ret = -EAGAIN;
 			goto skip_cqe;
 		}
-		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+		if (unlikely(!CQE_STATUS(hw_cqe) &&
+			     CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
 			t4_set_wq_in_error(wq);
-			hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
-			goto proc_cqe;
+			hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
 		}
 		goto proc_cqe;
 	}
@@ -748,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
 				c4iw_invalidate_mr(qhp->rhp,
 						   CQE_WRID_FR_STAG(&cqe));
 			break;
-		case C4IW_DRAIN_OPCODE:
-			wc->opcode = IB_WC_SEND;
-			break;
 		default:
 			pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
 			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 470f97a79ebb7f90e649179ab34b30ad0c089df7..65dd3726ca024db4e0fabff5c4527c676757065e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
 	return IB_QPS_ERR;
 }
 
-#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
-
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
 	return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 38bddd02a9437470e0f3ed98a7e55afbc8cc7384..d5c92fc520d6471f5c1f41d77dd15b280a8a651b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
 	return 0;
 }
 
-static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+static int ib_to_fw_opcode(int ib_opcode)
+{
+	int opcode;
+
+	switch (ib_opcode) {
+	case IB_WR_SEND_WITH_INV:
+		opcode = FW_RI_SEND_WITH_INV;
+		break;
+	case IB_WR_SEND:
+		opcode = FW_RI_SEND;
+		break;
+	case IB_WR_RDMA_WRITE:
+		opcode = FW_RI_RDMA_WRITE;
+		break;
+	case IB_WR_RDMA_READ:
+	case IB_WR_RDMA_READ_WITH_INV:
+		opcode = FW_RI_READ_REQ;
+		break;
+	case IB_WR_REG_MR:
+		opcode = FW_RI_FAST_REGISTER;
+		break;
+	case IB_WR_LOCAL_INV:
+		opcode = FW_RI_LOCAL_INV;
+		break;
+	default:
+		opcode = -EINVAL;
+	}
+	return opcode;
+}
+
+static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
 {
 	struct t4_cqe cqe = {};
 	struct c4iw_cq *schp;
 	unsigned long flag;
 	struct t4_cq *cq;
+	int opcode;
 
 	schp = to_c4iw_cq(qhp->ibqp.send_cq);
 	cq = &schp->cq;
 
+	opcode = ib_to_fw_opcode(wr->opcode);
+	if (opcode < 0)
+		return opcode;
+
 	cqe.u.drain_cookie = wr->wr_id;
 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-				 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+				 CQE_OPCODE_V(opcode) |
 				 CQE_TYPE_V(1) |
 				 CQE_SWCQE_V(1) |
+				 CQE_DRAIN_V(1) |
 				 CQE_QPID_V(qhp->wq.sq.qid));
 
 	spin_lock_irqsave(&schp->lock, flag);
@@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
 					   schp->ibcq.cq_context);
 		spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
 	}
+	return 0;
+}
+
+static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
+				struct ib_send_wr **bad_wr)
+{
+	int ret = 0;
+
+	while (wr) {
+		ret = complete_sq_drain_wr(qhp, wr);
+		if (ret) {
+			*bad_wr = wr;
+			break;
+		}
+		wr = wr->next;
+	}
+	return ret;
 }
 
 static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
 
 	cqe.u.drain_cookie = wr->wr_id;
 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
-				 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+				 CQE_OPCODE_V(FW_RI_SEND) |
 				 CQE_TYPE_V(0) |
 				 CQE_SWCQE_V(1) |
+				 CQE_DRAIN_V(1) |
 				 CQE_QPID_V(qhp->wq.sq.qid));
 
 	spin_lock_irqsave(&rchp->lock, flag);
@@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
 	}
 }
 
+static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+	while (wr) {
+		complete_rq_drain_wr(qhp, wr);
+		wr = wr->next;
+	}
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 		   struct ib_send_wr **bad_wr)
 {
@@ -875,7 +937,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 	 */
 	if (qhp->wq.flushed) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
-		complete_sq_drain_wr(qhp, wr);
+		err = complete_sq_drain_wrs(qhp, wr, bad_wr);
 		return err;
 	}
 	num_wrs = t4_sq_avail(&qhp->wq);
@@ -1023,7 +1085,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 	 */
 	if (qhp->wq.flushed) {
 		spin_unlock_irqrestore(&qhp->lock, flag);
-		complete_rq_drain_wr(qhp, wr);
+		complete_rq_drain_wrs(qhp, wr);
 		return err;
 	}
 	num_wrs = t4_rq_avail(&qhp->wq);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e9ea94268d51545b07400ed9cba095d8461fb2a1..79e8ee12c391cf6d800911d4b2f0e16063c6de39 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -197,6 +197,11 @@ struct t4_cqe {
 #define CQE_SWCQE_G(x)    ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
 #define CQE_SWCQE_V(x)	  ((x)<<CQE_SWCQE_S)
 
+#define CQE_DRAIN_S       10
+#define CQE_DRAIN_M       0x1
+#define CQE_DRAIN_G(x)    ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x)	  ((x)<<CQE_DRAIN_S)
+
 #define CQE_STATUS_S      5
 #define CQE_STATUS_M      0x1F
 #define CQE_STATUS_G(x)   ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -213,6 +218,7 @@ struct t4_cqe {
 #define CQE_OPCODE_V(x)   ((x)<<CQE_OPCODE_S)
 
 #define SW_CQE(x)         (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x)      (CQE_DRAIN_G(be32_to_cpu((x)->header)))
 #define CQE_QPID(x)       (CQE_QPID_G(be32_to_cpu((x)->header)))
 #define CQE_TYPE(x)       (CQE_TYPE_G(be32_to_cpu((x)->header)))
 #define SQ_TYPE(x)	  (CQE_TYPE((x)))
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 4a9b4d7efe6362f31457672882e882af3bfdfc46..8ce9118d4a7fbac52494de9e5b489dc44365706b 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1131,7 +1131,6 @@ struct hfi1_devdata {
 	u16 pcie_lnkctl;
 	u16 pcie_devctl2;
 	u32 pci_msix0;
-	u32 pci_lnkctl3;
 	u32 pci_tph2;
 
 	/*
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 09e50fd2a08f07bf7b2d42d3d4b4a1a00644b2ce..8c7e7a60b71584d5e587f6ef664cc773ab991991 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd)
 	if (ret)
 		goto error;
 
-	ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
-				     dd->pci_lnkctl3);
-	if (ret)
-		goto error;
-
-	ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2);
-	if (ret)
-		goto error;
-
+	if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+		ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+					     dd->pci_tph2);
+		if (ret)
+			goto error;
+	}
 	return 0;
 
 error:
@@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd)
 	if (ret)
 		goto error;
 
-	ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1,
-				    &dd->pci_lnkctl3);
-	if (ret)
-		goto error;
-
-	ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2);
-	if (ret)
-		goto error;
-
+	if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) {
+		ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2,
+					    &dd->pci_tph2);
+		if (ret)
+			goto error;
+	}
 	return 0;
 
 error:
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 470995fa38d23ee9c5917f9986c53c582db8d542..6f6712f87a730de3cc8c05aeb73f50de73c07487 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -47,17 +47,6 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
 	return err;
 }
 
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
-				bool reset, void *out, int out_size)
-{
-	u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
-
-	MLX5_SET(query_cong_statistics_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
-	MLX5_SET(query_cong_statistics_in, in, clear, reset);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-
 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
 			       void *out, int out_size)
 {
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index af4c24596274deabd07ea0d95b719f4fd5d990e1..78ffded7cc2c59e2744d6279d96adc6e10cd1348 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -37,8 +37,6 @@
 #include <linux/mlx5/driver.h>
 
 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
-int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
-				bool reset, void *out, int out_size);
 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
 			       void *out, int out_size);
 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 543d0a4c8bf36eb43af6d9047c7eaa6cd75d9638..8ac50de2b2421ea66cb23eff88c09f8ecf3a0f94 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1463,6 +1463,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 	}
 
 	INIT_LIST_HEAD(&context->vma_private_list);
+	mutex_init(&context->vma_private_list_mutex);
 	INIT_LIST_HEAD(&context->db_page_list);
 	mutex_init(&context->db_page_mutex);
 
@@ -1624,7 +1625,9 @@ static void  mlx5_ib_vma_close(struct vm_area_struct *area)
 	 * mlx5_ib_disassociate_ucontext().
 	 */
 	mlx5_ib_vma_priv_data->vma = NULL;
+	mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
 	list_del(&mlx5_ib_vma_priv_data->list);
+	mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
 	kfree(mlx5_ib_vma_priv_data);
 }
 
@@ -1644,10 +1647,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
 		return -ENOMEM;
 
 	vma_prv->vma = vma;
+	vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
 	vma->vm_private_data = vma_prv;
 	vma->vm_ops =  &mlx5_ib_vm_ops;
 
+	mutex_lock(&ctx->vma_private_list_mutex);
 	list_add(&vma_prv->list, vma_head);
+	mutex_unlock(&ctx->vma_private_list_mutex);
 
 	return 0;
 }
@@ -1690,6 +1696,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
 	 * mlx5_ib_vma_close.
 	 */
 	down_write(&owning_mm->mmap_sem);
+	mutex_lock(&context->vma_private_list_mutex);
 	list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
 				 list) {
 		vma = vma_private->vma;
@@ -1704,6 +1711,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
 		list_del(&vma_private->list);
 		kfree(vma_private);
 	}
+	mutex_unlock(&context->vma_private_list_mutex);
 	up_write(&owning_mm->mmap_sem);
 	mmput(owning_mm);
 	put_task_struct(owning_process);
@@ -3737,34 +3745,6 @@ free:
 	return ret;
 }
 
-static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev,
-				       struct mlx5_ib_port *port,
-				       struct rdma_hw_stats *stats)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
-	void *out;
-	int ret, i;
-	int offset = port->cnts.num_q_counters;
-
-	out = kvzalloc(outlen, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen);
-	if (ret)
-		goto free;
-
-	for (i = 0; i < port->cnts.num_cong_counters; i++) {
-		stats->value[i + offset] =
-			be64_to_cpup((__be64 *)(out +
-				     port->cnts.offsets[i + offset]));
-	}
-
-free:
-	kvfree(out);
-	return ret;
-}
-
 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
 				struct rdma_hw_stats *stats,
 				u8 port_num, int index)
@@ -3782,7 +3762,12 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
 	num_counters = port->cnts.num_q_counters;
 
 	if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
-		ret = mlx5_ib_query_cong_counters(dev, port, stats);
+		ret = mlx5_lag_query_cong_counters(dev->mdev,
+						   stats->value +
+						   port->cnts.num_q_counters,
+						   port->cnts.num_cong_counters,
+						   port->cnts.offsets +
+						   port->cnts.num_q_counters);
 		if (ret)
 			return ret;
 		num_counters += port->cnts.num_cong_counters;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 6dd8cac78de2c44854f1d5006dd058d2bc81e170..2c5f3533bbc9cbf08ce4fc816ecb86bc5ff04612 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -115,6 +115,8 @@ enum {
 struct mlx5_ib_vma_private_data {
 	struct list_head list;
 	struct vm_area_struct *vma;
+	/* protect vma_private_list add/del */
+	struct mutex *vma_private_list_mutex;
 };
 
 struct mlx5_ib_ucontext {
@@ -129,6 +131,8 @@ struct mlx5_ib_ucontext {
 	/* Transport Domain number */
 	u32			tdn;
 	struct list_head	vma_private_list;
+	/* protect vma_private_list add/del */
+	struct mutex		vma_private_list_mutex;
 
 	unsigned long		upd_xlt_page;
 	/* protect ODP/KSM */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index ee0ee1f9994b4fae933d8590b72ba138d29e176d..d109fe8290a70964d71e44ceb1408215f24d2058 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1637,6 +1637,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
 	MLX5_SET(mkc, mkc, access_mode, mr->access_mode);
 	MLX5_SET(mkc, mkc, umr_en, 1);
 
+	mr->ibmr.device = pd->device;
 	err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
 	if (err)
 		goto err_destroy_psv;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 63bc2efc34eb57e37f364e5b73e5a157e6040e47..4f7bd3b6a3152937a8ec2fa4de8637e3d9391a9a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -94,7 +94,7 @@ struct pvrdma_cq {
 	u32 cq_handle;
 	bool is_kernel;
 	atomic_t refcnt;
-	wait_queue_head_t wait;
+	struct completion free;
 };
 
 struct pvrdma_id_table {
@@ -175,7 +175,7 @@ struct pvrdma_srq {
 	u32 srq_handle;
 	int npages;
 	refcount_t refcnt;
-	wait_queue_head_t wait;
+	struct completion free;
 };
 
 struct pvrdma_qp {
@@ -197,7 +197,7 @@ struct pvrdma_qp {
 	bool is_kernel;
 	struct mutex mutex; /* QP state mutex. */
 	atomic_t refcnt;
-	wait_queue_head_t wait;
+	struct completion free;
 };
 
 struct pvrdma_dev {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 3562c0c30492d07a7d1eddacb033ef769a1f9264..e529622cefad6a501492dc165458b738636842b3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
 		pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
 
 	atomic_set(&cq->refcnt, 1);
-	init_waitqueue_head(&cq->wait);
+	init_completion(&cq->free);
 	spin_lock_init(&cq->cq_lock);
 
 	memset(cmd, 0, sizeof(*cmd));
@@ -230,8 +230,9 @@ err_cq:
 
 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
 {
-	atomic_dec(&cq->refcnt);
-	wait_event(cq->wait, !atomic_read(&cq->refcnt));
+	if (atomic_dec_and_test(&cq->refcnt))
+		complete(&cq->free);
+	wait_for_completion(&cq->free);
 
 	if (!cq->is_kernel)
 		ib_umem_release(cq->umem);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 1f4e18717a006da9cd3566318123f0fff1038df9..e92681878c93f4b0d4f29724700cc7ba474d7de4 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
 		ibqp->event_handler(&e, ibqp->qp_context);
 	}
 	if (qp) {
-		atomic_dec(&qp->refcnt);
-		if (atomic_read(&qp->refcnt) == 0)
-			wake_up(&qp->wait);
+		if (atomic_dec_and_test(&qp->refcnt))
+			complete(&qp->free);
 	}
 }
 
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
 		ibcq->event_handler(&e, ibcq->cq_context);
 	}
 	if (cq) {
-		atomic_dec(&cq->refcnt);
-		if (atomic_read(&cq->refcnt) == 0)
-			wake_up(&cq->wait);
+		if (atomic_dec_and_test(&cq->refcnt))
+			complete(&cq->free);
 	}
 }
 
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
 	}
 	if (srq) {
 		if (refcount_dec_and_test(&srq->refcnt))
-			wake_up(&srq->wait);
+			complete(&srq->free);
 	}
 }
 
@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
 		if (cq && cq->ibcq.comp_handler)
 			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 		if (cq) {
-			atomic_dec(&cq->refcnt);
-			if (atomic_read(&cq->refcnt))
-				wake_up(&cq->wait);
+			if (atomic_dec_and_test(&cq->refcnt))
+				complete(&cq->free);
 		}
 		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
 	}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 10420a18d02f46dc07f1698387e11105ab76c004..4059308e1454a5bfda72a31d9931afaa50d39441 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
 		spin_lock_init(&qp->rq.lock);
 		mutex_init(&qp->mutex);
 		atomic_set(&qp->refcnt, 1);
-		init_waitqueue_head(&qp->wait);
+		init_completion(&qp->free);
 
 		qp->state = IB_QPS_RESET;
 
@@ -428,8 +428,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
 
 	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
 
-	atomic_dec(&qp->refcnt);
-	wait_event(qp->wait, !atomic_read(&qp->refcnt));
+	if (atomic_dec_and_test(&qp->refcnt))
+		complete(&qp->free);
+	wait_for_completion(&qp->free);
+
+	if (!qp->is_kernel) {
+		if (qp->rumem)
+			ib_umem_release(qp->rumem);
+		if (qp->sumem)
+			ib_umem_release(qp->sumem);
+	}
 
 	pvrdma_page_dir_cleanup(dev, &qp->pdir);
 
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 826ccb864596dc19879211f9cdeefee47e536d5c..5acebb1ef631ae0070d28917530345689a2654de 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 
 	spin_lock_init(&srq->lock);
 	refcount_set(&srq->refcnt, 1);
-	init_waitqueue_head(&srq->wait);
+	init_completion(&srq->free);
 
 	dev_dbg(&dev->pdev->dev,
 		"create shared receive queue from user space\n");
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
 	dev->srq_tbl[srq->srq_handle] = NULL;
 	spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 
-	refcount_dec(&srq->refcnt);
-	wait_event(srq->wait, !refcount_read(&srq->refcnt));
+	if (refcount_dec_and_test(&srq->refcnt))
+		complete(&srq->free);
+	wait_for_completion(&srq->free);
 
 	/* There is no support for kernel clients, so this is safe. */
 	ib_umem_release(srq->umem);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 3b96cdaf9a835d0faebad2b6727dde1132f109f1..e6151a29c412a36d7d9db66833ad659ab853656b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1236,13 +1236,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
 		ipoib_ib_dev_down(dev);
 
 	if (level == IPOIB_FLUSH_HEAVY) {
-		rtnl_lock();
 		if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
 			ipoib_ib_dev_stop(dev);
 
-		result = ipoib_ib_dev_open(dev);
-		rtnl_unlock();
-		if (result)
+		if (ipoib_ib_dev_open(dev))
 			return;
 
 		if (netif_queue_stopped(dev))
@@ -1282,7 +1279,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
 	struct ipoib_dev_priv *priv =
 		container_of(work, struct ipoib_dev_priv, flush_heavy);
 
+	rtnl_lock();
 	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
+	rtnl_unlock();
 }
 
 void ipoib_ib_dev_cleanup(struct net_device *dev)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7d5eb004091d1d64f6956825e9dd14bf975a8b68..97baf88d950589afa9a712005d903fa1603eea14 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
 			       struct irq_cfg *cfg);
 
 static int irq_remapping_activate(struct irq_domain *domain,
-				  struct irq_data *irq_data, bool early)
+				  struct irq_data *irq_data, bool reserve)
 {
 	struct amd_ir_data *data = irq_data->chip_data;
 	struct irq_2_irte *irte_info = &data->irq_2_irte;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 76a193c7fcfc69b012d3e6e1f1dd246ab8d5acc8..66f69af2c2191f6a247a82acc227ed64b85c6bff 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain,
 }
 
 static int intel_irq_remapping_activate(struct irq_domain *domain,
-					struct irq_data *irq_data, bool early)
+					struct irq_data *irq_data, bool reserve)
 {
 	intel_ir_reconfigure_irte(irq_data, true);
 	return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4039e64cd34211db8fac8ebc2f993c5e081e9c83..06f025fd5726f6b230d51c880e7b8accf9e8c738 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 }
 
 static int its_irq_domain_activate(struct irq_domain *domain,
-				   struct irq_data *d, bool early)
+				   struct irq_data *d, bool reserve)
 {
 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
 	u32 event = its_get_event_id(d);
@@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
 }
 
 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
-				       struct irq_data *d, bool early)
+				       struct irq_data *d, bool reserve)
 {
 	struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
 	struct its_node *its;
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 06f29cf5018a151f7d35641d90b8d043aec85516..cee59fe1321c44f9e37c9b6e5bcfc1a57fb98f41 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
  */
 static struct lock_class_key intc_irqpin_irq_lock_class;
 
+/* And this is for the request mutex */
+static struct lock_class_key intc_irqpin_irq_request_class;
+
 static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
 				      irq_hw_number_t hw)
 {
@@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
 
 	intc_irqpin_dbg(&p->irq[hw], "map");
 	irq_set_chip_data(virq, h->host_data);
-	irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
+	irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class,
+			      &intc_irqpin_irq_request_class);
 	irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
 	return 0;
 }
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index fd83c7f77a95d4998cff58be3d78ba467faddeab..f3654fd2eaf31b1fad6553c4f63ca04ebdbe0431 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -186,7 +186,7 @@ void led_blink_set(struct led_classdev *led_cdev,
 		   unsigned long *delay_on,
 		   unsigned long *delay_off)
 {
-	del_timer_sync(&led_cdev->blink_timer);
+	led_stop_software_blink(led_cdev);
 
 	clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
 	clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 09cf3699e354415046af18c21eae3d98a445a53e..a307832d7e45fd90e046c7118d7fe505036d3cf6 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = {
 };
 
 static struct lock_class_key arizona_irq_lock_class;
+static struct lock_class_key arizona_irq_request_class;
 
 static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
 			      irq_hw_number_t hw)
@@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
 	struct arizona *data = h->host_data;
 
 	irq_set_chip_data(virq, data);
-	irq_set_lockdep_class(virq, &arizona_irq_lock_class);
+	irq_set_lockdep_class(virq, &arizona_irq_lock_class,
+		&arizona_irq_request_class);
 	irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
 	irq_set_noprobe(virq);
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index c9714072e22465d4b23d8101038f782b084b2dca..59c82cdcf48d8a508613dbc7b1c98654285de28f 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -377,6 +377,7 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
 	u8 *ptr;
 	u8 *rx_buf;
 	u8 sum;
+	u8 rx_byte;
 	int ret = 0, final_ret;
 
 	len = cros_ec_prepare_tx(ec_dev, ec_msg);
@@ -421,25 +422,22 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
 	if (!ret) {
 		/* Verify that EC can process command */
 		for (i = 0; i < len; i++) {
-			switch (rx_buf[i]) {
-			case EC_SPI_PAST_END:
-			case EC_SPI_RX_BAD_DATA:
-			case EC_SPI_NOT_READY:
-				ret = -EAGAIN;
-				ec_msg->result = EC_RES_IN_PROGRESS;
-			default:
+			rx_byte = rx_buf[i];
+			if (rx_byte == EC_SPI_PAST_END  ||
+			    rx_byte == EC_SPI_RX_BAD_DATA ||
+			    rx_byte == EC_SPI_NOT_READY) {
+				ret = -EREMOTEIO;
 				break;
 			}
-			if (ret)
-				break;
 		}
-		if (!ret)
-			ret = cros_ec_spi_receive_packet(ec_dev,
-					ec_msg->insize + sizeof(*response));
-	} else {
-		dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
 	}
 
+	if (!ret)
+		ret = cros_ec_spi_receive_packet(ec_dev,
+				ec_msg->insize + sizeof(*response));
+	else
+		dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
 	final_ret = terminate_request(ec_dev);
 
 	spi_bus_unlock(ec_spi->spi->master);
@@ -508,6 +506,7 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
 	int i, len;
 	u8 *ptr;
 	u8 *rx_buf;
+	u8 rx_byte;
 	int sum;
 	int ret = 0, final_ret;
 
@@ -544,25 +543,22 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
 	if (!ret) {
 		/* Verify that EC can process command */
 		for (i = 0; i < len; i++) {
-			switch (rx_buf[i]) {
-			case EC_SPI_PAST_END:
-			case EC_SPI_RX_BAD_DATA:
-			case EC_SPI_NOT_READY:
-				ret = -EAGAIN;
-				ec_msg->result = EC_RES_IN_PROGRESS;
-			default:
+			rx_byte = rx_buf[i];
+			if (rx_byte == EC_SPI_PAST_END  ||
+			    rx_byte == EC_SPI_RX_BAD_DATA ||
+			    rx_byte == EC_SPI_NOT_READY) {
+				ret = -EREMOTEIO;
 				break;
 			}
-			if (ret)
-				break;
 		}
-		if (!ret)
-			ret = cros_ec_spi_receive_response(ec_dev,
-					ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
-	} else {
-		dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
 	}
 
+	if (!ret)
+		ret = cros_ec_spi_receive_response(ec_dev,
+				ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
+	else
+		dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
+
 	final_ret = terminate_request(ec_dev);
 
 	spi_bus_unlock(ec_spi->spi->master);
@@ -667,6 +663,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
 			   sizeof(struct ec_response_get_protocol_info);
 	ec_dev->dout_size = sizeof(struct ec_host_request);
 
+	ec_spi->last_transfer_ns = ktime_get_ns();
 
 	err = cros_ec_register(ec_dev);
 	if (err) {
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index da16bf45fab43ee9a946beef340f4cd2a224156e..dc94ffc6321a84dd25ce08d0f1a9374d40d4cead 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
 EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
 
 static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
-			      struct device_node *node)
+			      struct device_node *parent)
 {
+	struct device_node *node;
+
 	if (pdata && pdata->codec)
 		return true;
 
-	if (of_find_node_by_name(node, "codec"))
+	node = of_get_child_by_name(parent, "codec");
+	if (node) {
+		of_node_put(node);
 		return true;
+	}
 
 	return false;
 }
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d66502d36ba0b3202d1c15c08540fa8aade42a32..dd19f17a1b637543965dd94e64d0d44b9178f64c 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
 };
 
 
-static bool twl6040_has_vibra(struct device_node *node)
+static bool twl6040_has_vibra(struct device_node *parent)
 {
-#ifdef CONFIG_OF
-	if (of_find_node_by_name(node, "vibra"))
+	struct device_node *node;
+
+	node = of_get_child_by_name(parent, "vibra");
+	if (node) {
+		of_node_put(node);
 		return true;
-#endif
+	}
+
 	return false;
 }
 
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index f80e911b8843819db8dcd1956c76ce2bf60b5ab8..73b6055774474e322b07cda4144c48b5b235a55c 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1114,7 +1114,7 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
 	if (!ops->oobbuf)
 		ops->ooblen = 0;
 
-	if (offs < 0 || offs + ops->len >= mtd->size)
+	if (offs < 0 || offs + ops->len > mtd->size)
 		return -EINVAL;
 
 	if (ops->ooblen) {
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e0eb51d8c0129937b35157ccdc107e5ef54c038a..dd56a671ea4285af0f5079bc652ecf4a32410272 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1763,7 +1763,7 @@ try_dmaread:
 			err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
 							      addr);
 			/* erased page bitflips corrected */
-			if (err > 0)
+			if (err >= 0)
 				return err;
 		}
 
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 484f7fbc3f7d2d11cd66fc3416e64ab38d47f852..a8bde6665c24f7e20e6103959ceee16c5d3ec5c8 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -253,9 +253,9 @@ static int gpio_nand_probe(struct platform_device *pdev)
 		goto out_ce;
 	}
 
-	gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
-	if (IS_ERR(gpiomtd->nwp)) {
-		ret = PTR_ERR(gpiomtd->nwp);
+	gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
+	if (IS_ERR(gpiomtd->ale)) {
+		ret = PTR_ERR(gpiomtd->ale);
 		goto out_ce;
 	}
 
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 50f8d4a1b9832326070045d0c294d22393001fbd..d4d824ef64e9fb395af3bc549daae72b96731e16 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1067,9 +1067,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 		return ret;
 	}
 
-	/* handle the block mark swapping */
-	block_mark_swapping(this, payload_virt, auxiliary_virt);
-
 	/* Loop over status bytes, accumulating ECC status. */
 	status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
 
@@ -1158,6 +1155,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
 		max_bitflips = max_t(unsigned int, max_bitflips, *status);
 	}
 
+	/* handle the block mark swapping */
+	block_mark_swapping(this, buf, auxiliary_virt);
+
 	if (oob_required) {
 		/*
 		 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 3c63b16d485f4bb3a7587e9e6d36dd0e121668d2..d9efbc8d783b84b128379e0ce58a43b005a8ab58 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -159,6 +159,8 @@ struct arc_emac_priv {
 	unsigned int link;
 	unsigned int duplex;
 	unsigned int speed;
+
+	unsigned int rx_missed_errors;
 };
 
 /**
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 3241af1ce7182824c09ee3ad774f122565f6c940..bd277b0dc615118a58b81dfba5b040e26fa667ba 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -26,6 +26,8 @@
 
 #include "emac.h"
 
+static void arc_emac_restart(struct net_device *ndev);
+
 /**
  * arc_emac_tx_avail - Return the number of available slots in the tx ring.
  * @priv: Pointer to ARC EMAC private data structure.
@@ -210,39 +212,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
 			continue;
 		}
 
-		pktlen = info & LEN_MASK;
-		stats->rx_packets++;
-		stats->rx_bytes += pktlen;
-		skb = rx_buff->skb;
-		skb_put(skb, pktlen);
-		skb->dev = ndev;
-		skb->protocol = eth_type_trans(skb, ndev);
-
-		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
-				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
-
-		/* Prepare the BD for next cycle */
-		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
-							 EMAC_BUFFER_SIZE);
-		if (unlikely(!rx_buff->skb)) {
+		/* Prepare the BD for next cycle. netif_receive_skb()
+		 * only if new skb was allocated and mapped to avoid holes
+		 * in the RX fifo.
+		 */
+		skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
+		if (unlikely(!skb)) {
+			if (net_ratelimit())
+				netdev_err(ndev, "cannot allocate skb\n");
+			/* Return ownership to EMAC */
+			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 			stats->rx_errors++;
-			/* Because receive_skb is below, increment rx_dropped */
 			stats->rx_dropped++;
 			continue;
 		}
 
-		/* receive_skb only if new skb was allocated to avoid holes */
-		netif_receive_skb(skb);
-
-		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
+		addr = dma_map_single(&ndev->dev, (void *)skb->data,
 				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
 		if (dma_mapping_error(&ndev->dev, addr)) {
 			if (net_ratelimit())
-				netdev_err(ndev, "cannot dma map\n");
-			dev_kfree_skb(rx_buff->skb);
+				netdev_err(ndev, "cannot map dma buffer\n");
+			dev_kfree_skb(skb);
+			/* Return ownership to EMAC */
+			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 			stats->rx_errors++;
+			stats->rx_dropped++;
 			continue;
 		}
+
+		/* unmap previosly mapped skb */
+		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+		pktlen = info & LEN_MASK;
+		stats->rx_packets++;
+		stats->rx_bytes += pktlen;
+		skb_put(rx_buff->skb, pktlen);
+		rx_buff->skb->dev = ndev;
+		rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
+
+		netif_receive_skb(rx_buff->skb);
+
+		rx_buff->skb = skb;
 		dma_unmap_addr_set(rx_buff, addr, addr);
 		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
 
@@ -258,6 +269,53 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
 	return work_done;
 }
 
+/**
+ * arc_emac_rx_miss_handle - handle R_MISS register
+ * @ndev:	Pointer to the net_device structure.
+ */
+static void arc_emac_rx_miss_handle(struct net_device *ndev)
+{
+	struct arc_emac_priv *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	unsigned int miss;
+
+	miss = arc_reg_get(priv, R_MISS);
+	if (miss) {
+		stats->rx_errors += miss;
+		stats->rx_missed_errors += miss;
+		priv->rx_missed_errors += miss;
+	}
+}
+
+/**
+ * arc_emac_rx_stall_check - check RX stall
+ * @ndev:	Pointer to the net_device structure.
+ * @budget:	How many BDs requested to process on 1 call.
+ * @work_done:	How many BDs processed
+ *
+ * Under certain conditions EMAC stop reception of incoming packets and
+ * continuously increment R_MISS register instead of saving data into
+ * provided buffer. This function detect that condition and restart
+ * EMAC.
+ */
+static void arc_emac_rx_stall_check(struct net_device *ndev,
+				    int budget, unsigned int work_done)
+{
+	struct arc_emac_priv *priv = netdev_priv(ndev);
+	struct arc_emac_bd *rxbd;
+
+	if (work_done)
+		priv->rx_missed_errors = 0;
+
+	if (priv->rx_missed_errors && budget) {
+		rxbd = &priv->rxbd[priv->last_rx_bd];
+		if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
+			arc_emac_restart(ndev);
+			priv->rx_missed_errors = 0;
+		}
+	}
+}
+
 /**
  * arc_emac_poll - NAPI poll handler.
  * @napi:	Pointer to napi_struct structure.
@@ -272,6 +330,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
 	unsigned int work_done;
 
 	arc_emac_tx_clean(ndev);
+	arc_emac_rx_miss_handle(ndev);
 
 	work_done = arc_emac_rx(ndev, budget);
 	if (work_done < budget) {
@@ -279,6 +338,8 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
 		arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
 	}
 
+	arc_emac_rx_stall_check(ndev, budget, work_done);
+
 	return work_done;
 }
 
@@ -320,6 +381,8 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
 		if (status & MSER_MASK) {
 			stats->rx_missed_errors += 0x100;
 			stats->rx_errors += 0x100;
+			priv->rx_missed_errors += 0x100;
+			napi_schedule(&priv->napi);
 		}
 
 		if (status & RXCR_MASK) {
@@ -732,6 +795,63 @@ static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 }
 
 
+/**
+ * arc_emac_restart - Restart EMAC
+ * @ndev:	Pointer to net_device structure.
+ *
+ * This function do hardware reset of EMAC in order to restore
+ * network packets reception.
+ */
+static void arc_emac_restart(struct net_device *ndev)
+{
+	struct arc_emac_priv *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
+	int i;
+
+	if (net_ratelimit())
+		netdev_warn(ndev, "restarting stalled EMAC\n");
+
+	netif_stop_queue(ndev);
+
+	/* Disable interrupts */
+	arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+	/* Disable EMAC */
+	arc_reg_clr(priv, R_CTRL, EN_MASK);
+
+	/* Return the sk_buff to system */
+	arc_free_tx_queue(ndev);
+
+	/* Clean Tx BD's */
+	priv->txbd_curr = 0;
+	priv->txbd_dirty = 0;
+	memset(priv->txbd, 0, TX_RING_SZ);
+
+	for (i = 0; i < RX_BD_NUM; i++) {
+		struct arc_emac_bd *rxbd = &priv->rxbd[i];
+		unsigned int info = le32_to_cpu(rxbd->info);
+
+		if (!(info & FOR_EMAC)) {
+			stats->rx_errors++;
+			stats->rx_dropped++;
+		}
+		/* Return ownership to EMAC */
+		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+	}
+	priv->last_rx_bd = 0;
+
+	/* Make sure info is visible to EMAC before enable */
+	wmb();
+
+	/* Enable interrupts */
+	arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
+
+	/* Enable EMAC */
+	arc_reg_or(priv, R_CTRL, EN_MASK);
+
+	netif_start_queue(ndev);
+}
+
 static const struct net_device_ops arc_emac_netdev_ops = {
 	.ndo_open		= arc_emac_open,
 	.ndo_stop		= arc_emac_stop,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4c739d5355d2279a5f1c4cd23e8ccf3288788e8d..8ae269ec17a119b419afc92eeb66dde76d43d473 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3030,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 
 	del_timer_sync(&bp->timer);
 
-	if (IS_PF(bp)) {
+	if (IS_PF(bp) && !BP_NOMCP(bp)) {
 		/* Set ALWAYS_ALIVE bit in shmem */
 		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
 		bnx2x_drv_pulse(bp);
@@ -3116,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 	bp->cnic_loaded = false;
 
 	/* Clear driver version indication in shmem */
-	if (IS_PF(bp))
+	if (IS_PF(bp) && !BP_NOMCP(bp))
 		bnx2x_update_mng_version(bp);
 
 	/* Check if there are pending parity attentions. If there are - set
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 91e2a7560b48d572d26e8566c9a3b0667083d45d..ddd5d3ebd20111f667536aac61141b10e5bb2e7c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
 
 	do {
 		bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+
+		/* If we read all 0xFFs, means we are in PCI error state and
+		 * should bail out to avoid crashes on adapter's FW reads.
+		 */
+		if (bp->common.shmem_base == 0xFFFFFFFF) {
+			bp->flags |= NO_MCP_FLAG;
+			return -ENODEV;
+		}
+
 		if (bp->common.shmem_base) {
 			val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
 			if (val & SHR_MEM_VALIDITY_MB)
@@ -14320,7 +14329,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
 		BNX2X_ERR("IO slot reset --> driver unload\n");
 
 		/* MCP should have been reset; Need to wait for validity */
-		bnx2x_init_shmem(bp);
+		if (bnx2x_init_shmem(bp)) {
+			rtnl_unlock();
+			return PCI_ERS_RESULT_DISCONNECT;
+		}
 
 		if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
 			u32 v;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index de51c2177d03b3cf9e4653226bd8901b1d29834e..8995cfefbfcf1aa1a46f1cbd2f1bdb6722dca7bb 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,11 +4,13 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2014 Broadcom Corporation.
+ * Copyright (C) 2005-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
  *
  * Firmware is:
  *	Derived from proprietary unpublished source code,
- *	Copyright (C) 2000-2003 Broadcom Corporation.
+ *	Copyright (C) 2000-2016 Broadcom Corporation.
+ *	Copyright (C) 2016-2017 Broadcom Ltd.
  *
  *	Permission is hereby granted for the distribution of this firmware
  *	data in hexadecimal or equivalent format, provided this copyright
@@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
 
 	tw32(GRC_MODE, tp->grc_mode | val);
 
+	/* On one of the AMD platform, MRRS is restricted to 4000 because of
+	 * south bridge limitation. As a workaround, Driver is setting MRRS
+	 * to 2048 instead of default 4096.
+	 */
+	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
+		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
+		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
+	}
+
 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
 	val = tr32(GRC_MISC_CFG);
 	val &= ~0xff;
@@ -14225,7 +14237,10 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
 	 * breaks all requests to 256 bytes.
 	 */
-	if (tg3_asic_rev(tp) == ASIC_REV_57766)
+	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
+	    tg3_asic_rev(tp) == ASIC_REV_5720)
 		reset_phy = true;
 
 	err = tg3_restart_hw(tp, reset_phy);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index c2d02d02d1e6f4558365fe53f152e13e045e0277..1f0271fa7c74026edbc68c7880a44287eded7066 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -5,7 +5,8 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2014 Broadcom Corporation.
+ * Copyright (C) 2007-2016 Broadcom Corporation.
+ * Copyright (C) 2016-2017 Broadcom Limited.
  */
 
 #ifndef _T3_H
@@ -96,6 +97,7 @@
 #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR		0x0106
 #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT		0x0109
 #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT	0x010a
+#define TG3PCI_SUBDEVICE_ID_DELL_5762		0x07f0
 #define TG3PCI_SUBVENDOR_ID_COMPAQ		PCI_VENDOR_ID_COMPAQ
 #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE	0x007c
 #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2	0x009a
@@ -281,6 +283,9 @@
 #define TG3PCI_STD_RING_PROD_IDX	0x00000098 /* 64-bit */
 #define TG3PCI_RCV_RET_RING_CON_IDX	0x000000a0 /* 64-bit */
 /* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DEV_STATUS_CTRL		0x000000b4
+#define  MAX_READ_REQ_SIZE_2048		 0x00004000
+#define  MAX_READ_REQ_MASK		 0x00007000
 #define TG3PCI_DUAL_MAC_CTRL		0x000000b8
 #define  DUAL_MAC_CTRL_CH_MASK		 0x00000003
 #define  DUAL_MAC_CTRL_ID		 0x00000004
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 6105738552134809fc0d0abf8caadfa76f5e1f8a..8184d2fca9be017b4d374e1ba5b3fab6e7d77e9e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev)
 		for (i = 0; i < txq->bd.ring_size; i++) {
 			/* Initialize the BD for every fragment in the page. */
 			bdp->cbd_sc = cpu_to_fec16(0);
+			if (bdp->cbd_bufaddr &&
+			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+				dma_unmap_single(&fep->pdev->dev,
+						 fec32_to_cpu(bdp->cbd_bufaddr),
+						 fec16_to_cpu(bdp->cbd_datlen),
+						 DMA_TO_DEVICE);
 			if (txq->tx_skbuff[i]) {
 				dev_kfree_skb_any(txq->tx_skbuff[i]);
 				txq->tx_skbuff[i] = NULL;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc93b69cfd1edcf62d11cd24d41a9ca74b8f0dcc..a539263cd79ce4be8fcc0cbfe6bfdd196336cd38 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1214,6 +1214,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 
+	pp->link = 0;
+	pp->duplex = -1;
+	pp->speed = 0;
+
 	udelay(200);
 }
 
@@ -1958,9 +1962,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
+			mvneta_rx_error(pp, rx_desc);
 err_drop_frame:
 			dev->stats.rx_errors++;
-			mvneta_rx_error(pp, rx_desc);
 			/* leave the descriptor untouched */
 			continue;
 		}
@@ -3011,7 +3015,7 @@ static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
 	int queue;
 
-	for (queue = 0; queue < txq_number; queue++)
+	for (queue = 0; queue < rxq_number; queue++)
 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 54adfd96785846f9e60a2ded11ab96bc0c196c7e..fc67e35b253e4e59c12227c3e24da9c0f5bae311 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1961,11 +1961,12 @@ static int mtk_hw_init(struct mtk_eth *eth)
 	/* set GE2 TUNE */
 	regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
 
-	/* GE1, Force 1000M/FD, FC ON */
-	mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
-
-	/* GE2, Force 1000M/FD, FC ON */
-	mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
+	/* Set linkdown as the default for each GMAC. Its own MCR would be set
+	 * up with the more appropriate value when mtk_phy_link_adjust call is
+	 * being invoked.
+	 */
+	for (i = 0; i < MTK_MAC_COUNT; i++)
+		mtk_w32(eth, 0, MTK_MAC_MCR(i));
 
 	/* Indicates CDM to parse the MTK special tag from CPU
 	 * which also is working out for untag packets.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 1fffdebbc9e8994c70a19f4982f26d1de98be5f2..e9a1fbcc4adfa6e692902b551d0c535bfe019a9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -362,7 +362,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
 	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 	case MLX5_CMD_OP_QUERY_Q_COUNTER:
-	case MLX5_CMD_OP_SET_RATE_LIMIT:
+	case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
 	case MLX5_CMD_OP_QUERY_RATE_LIMIT:
 	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
@@ -505,7 +505,7 @@ const char *mlx5_command_str(int command)
 	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
 	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
-	MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
+	MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
 	MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
 	MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
 	MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index c0872b3284cb405583642d71a0e2e540d4804b6f..543060c305a073c0457cc31ae7318f425a0e7c49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -82,6 +82,9 @@
 	max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
 #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
 #define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \
+	(cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \
+	MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev))
 
 #define MLX5_MPWRQ_LOG_WQE_SZ			18
 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
@@ -590,6 +593,7 @@ struct mlx5e_channel {
 	struct mlx5_core_dev      *mdev;
 	struct hwtstamp_config    *tstamp;
 	int                        ix;
+	int                        cpu;
 };
 
 struct mlx5e_channels {
@@ -935,8 +939,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
 				 u8 cq_period_mode);
 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
 				 u8 cq_period_mode);
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-			      struct mlx5e_params *params, u8 rq_type);
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+			       struct mlx5e_params *params,
+			       u8 rq_type);
 
 static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
 {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c6d90b6dd80efa9a1ee82958adf5ef4dd3b4522d..9bcf38f4123b504637c080413078c23304d9e49e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -274,6 +274,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
 				    struct ieee_ets *ets)
 {
+	bool have_ets_tc = false;
 	int bw_sum = 0;
 	int i;
 
@@ -288,11 +289,14 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
 	}
 
 	/* Validate Bandwidth Sum */
-	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+			have_ets_tc = true;
 			bw_sum += ets->tc_tx_bw[i];
+		}
+	}
 
-	if (bw_sum != 0 && bw_sum != 100) {
+	if (have_ets_tc && bw_sum != 100) {
 		netdev_err(netdev,
 			   "Failed to validate ETS: BW sum is illegal\n");
 		return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 23425f02840581f6be591bc48cf8cccc8cc26443..8f05efa5c829bccb67ddd8b24dc2997adfe4a6c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1523,8 +1523,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
 	new_channels.params = priv->channels.params;
 	MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
 
-	mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
-				 new_channels.params.rq_wq_type);
+	new_channels.params.mpwqe_log_stride_sz =
+		MLX5E_MPWQE_STRIDE_SZ(priv->mdev, new_val);
+	new_channels.params.mpwqe_log_num_strides =
+		MLX5_MPWRQ_LOG_WQE_SZ - new_channels.params.mpwqe_log_stride_sz;
 
 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 		priv->channels.params = new_channels.params;
@@ -1536,6 +1538,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
 		return err;
 
 	mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+	mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
+		  MLX5E_GET_PFLAG(&priv->channels.params,
+				  MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d2b057a3e512c1144d741ccffd5bf47b5f138a01..d9d8227f195f0e151ba948e0622ea90a411817c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -71,11 +71,6 @@ struct mlx5e_channel_param {
 	struct mlx5e_cq_param      icosq_cq;
 };
 
-static int mlx5e_get_node(struct mlx5e_priv *priv, int ix)
-{
-	return pci_irq_get_node(priv->mdev->pdev, MLX5_EQ_VEC_COMP_BASE + ix);
-}
-
 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
 	return MLX5_CAP_GEN(mdev, striding_rq) &&
@@ -83,8 +78,8 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 		MLX5_CAP_ETH(mdev, reg_umr_sq);
 }
 
-void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
-			      struct mlx5e_params *params, u8 rq_type)
+void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
+			       struct mlx5e_params *params, u8 rq_type)
 {
 	params->rq_wq_type = rq_type;
 	params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -93,10 +88,8 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
 		params->log_rq_size = is_kdump_kernel() ?
 			MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
 			MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
-		params->mpwqe_log_stride_sz =
-			MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-			MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
-			MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+		params->mpwqe_log_stride_sz = MLX5E_MPWQE_STRIDE_SZ(mdev,
+			MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
 		params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
 			params->mpwqe_log_stride_sz;
 		break;
@@ -120,13 +113,14 @@ void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
 		       MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
 }
 
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
+				struct mlx5e_params *params)
 {
 	u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
 		    !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
 		    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
 		    MLX5_WQ_TYPE_LINKED_LIST;
-	mlx5e_set_rq_type_params(mdev, params, rq_type);
+	mlx5e_init_rq_type_params(mdev, params, rq_type);
 }
 
 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -444,17 +438,16 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
 	int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
 	int mtt_sz = mlx5e_get_wqe_mtt_sz();
 	int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
-	int node = mlx5e_get_node(c->priv, c->ix);
 	int i;
 
 	rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
-					GFP_KERNEL, node);
+				      GFP_KERNEL, cpu_to_node(c->cpu));
 	if (!rq->mpwqe.info)
 		goto err_out;
 
 	/* We allocate more than mtt_sz as we will align the pointer */
-	rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz,
-					GFP_KERNEL, node);
+	rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+					cpu_to_node(c->cpu));
 	if (unlikely(!rq->mpwqe.mtt_no_align))
 		goto err_free_wqe_info;
 
@@ -562,7 +555,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 	int err;
 	int i;
 
-	rqp->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+	rqp->wq.db_numa_node = cpu_to_node(c->cpu);
 
 	err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
 				&rq->wq_ctrl);
@@ -629,8 +622,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
 	default: /* MLX5_WQ_TYPE_LINKED_LIST */
 		rq->wqe.frag_info =
 			kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
-				     GFP_KERNEL,
-				     mlx5e_get_node(c->priv, c->ix));
+				     GFP_KERNEL, cpu_to_node(c->cpu));
 		if (!rq->wqe.frag_info) {
 			err = -ENOMEM;
 			goto err_rq_wq_destroy;
@@ -1000,13 +992,13 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 	sq->min_inline_mode = params->tx_min_inline_mode;
 
-	param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
 	if (err)
 		return err;
 	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-	err = mlx5e_alloc_xdpsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+	err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
 	if (err)
 		goto err_sq_wq_destroy;
 
@@ -1053,13 +1045,13 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
 	sq->channel   = c;
 	sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
-	param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
 	if (err)
 		return err;
 	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 
-	err = mlx5e_alloc_icosq_db(sq, mlx5e_get_node(c->priv, c->ix));
+	err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
 	if (err)
 		goto err_sq_wq_destroy;
 
@@ -1126,13 +1118,13 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
 	if (MLX5_IPSEC_DEV(c->priv->mdev))
 		set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
 
-	param->wq.db_numa_node = mlx5e_get_node(c->priv, c->ix);
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
 	if (err)
 		return err;
 	sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
 
-	err = mlx5e_alloc_txqsq_db(sq, mlx5e_get_node(c->priv, c->ix));
+	err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
 	if (err)
 		goto err_sq_wq_destroy;
 
@@ -1504,8 +1496,8 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
 	struct mlx5_core_dev *mdev = c->priv->mdev;
 	int err;
 
-	param->wq.buf_numa_node = mlx5e_get_node(c->priv, c->ix);
-	param->wq.db_numa_node  = mlx5e_get_node(c->priv, c->ix);
+	param->wq.buf_numa_node = cpu_to_node(c->cpu);
+	param->wq.db_numa_node  = cpu_to_node(c->cpu);
 	param->eq_ix   = c->ix;
 
 	err = mlx5e_alloc_cq_common(mdev, param, cq);
@@ -1604,6 +1596,11 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
 	mlx5e_free_cq(cq);
 }
 
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+	return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
 			     struct mlx5e_params *params,
 			     struct mlx5e_channel_param *cparam)
@@ -1752,12 +1749,13 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 {
 	struct mlx5e_cq_moder icocq_moder = {0, 0};
 	struct net_device *netdev = priv->netdev;
+	int cpu = mlx5e_get_cpu(priv, ix);
 	struct mlx5e_channel *c;
 	unsigned int irq;
 	int err;
 	int eqn;
 
-	c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix));
+	c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
 	if (!c)
 		return -ENOMEM;
 
@@ -1765,6 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
 	c->mdev     = priv->mdev;
 	c->tstamp   = &priv->tstamp;
 	c->ix       = ix;
+	c->cpu      = cpu;
 	c->pdev     = &priv->mdev->pdev->dev;
 	c->netdev   = priv->netdev;
 	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
@@ -1853,8 +1852,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
 	for (tc = 0; tc < c->num_tc; tc++)
 		mlx5e_activate_txqsq(&c->sq[tc]);
 	mlx5e_activate_rq(&c->rq);
-	netif_set_xps_queue(c->netdev,
-		mlx5_get_vector_affinity(c->priv->mdev, c->ix), c->ix);
+	netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
 }
 
 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -3679,6 +3677,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
 						     struct sk_buff *skb,
 						     netdev_features_t features)
 {
+	unsigned int offset = 0;
 	struct udphdr *udph;
 	u8 proto;
 	u16 port;
@@ -3688,7 +3687,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
 		proto = ip_hdr(skb)->protocol;
 		break;
 	case htons(ETH_P_IPV6):
-		proto = ipv6_hdr(skb)->nexthdr;
+		proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
 		break;
 	default:
 		goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 60771865c99c9bf4402d042a760887c4497e0036..e7e7cef2bde402be23b191873a5790ed23fd7843 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -466,7 +466,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
 			break;
 		case MLX5_EVENT_TYPE_CQ_ERROR:
 			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
-			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
 				       cqn, eqe->data.cq_err.syndrome);
 			mlx5_cq_event(dev, cqn, eqe->type);
 			break;
@@ -775,7 +775,7 @@ err1:
 	return err;
 }
 
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+void mlx5_stop_eqs(struct mlx5_core_dev *dev)
 {
 	struct mlx5_eq_table *table = &dev->priv.eq_table;
 	int err;
@@ -784,22 +784,26 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
 	if (MLX5_CAP_GEN(dev, pg)) {
 		err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
 		if (err)
-			return err;
+			mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
+				      err);
 	}
 #endif
 
 	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
 	if (err)
-		return err;
+		mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
+			      err);
 
-	mlx5_destroy_unmap_eq(dev, &table->async_eq);
+	err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+	if (err)
+		mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
+			      err);
 	mlx5_cmd_use_polling(dev);
 
 	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 	if (err)
-		mlx5_cmd_use_events(dev);
-
-	return err;
+		mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
+			      err);
 }
 
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
index 3c11d6e2160abeef5a893b7b81274a7ce315368c..14962969c5ba8c4462662eeb30ef10cbe1c27fa6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.c
@@ -66,6 +66,9 @@ static int mlx5_fpga_mem_read_i2c(struct mlx5_fpga_device *fdev, size_t size,
 	u8 actual_size;
 	int err;
 
+	if (!size)
+		return -EINVAL;
+
 	if (!fdev->mdev)
 		return -ENOTCONN;
 
@@ -95,6 +98,9 @@ static int mlx5_fpga_mem_write_i2c(struct mlx5_fpga_device *fdev, size_t size,
 	u8 actual_size;
 	int err;
 
+	if (!size)
+		return -EINVAL;
+
 	if (!fdev->mdev)
 		return -ENOTCONN;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c70fd663a63301e7e89ef9ee00d37c7075fe1a0b..dfaad9ecb2b8f155c5cdf30451c572b2d10f1d37 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -174,6 +174,8 @@ static void del_hw_fte(struct fs_node *node);
 static void del_sw_flow_table(struct fs_node *node);
 static void del_sw_flow_group(struct fs_node *node);
 static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
 /* Delete rule (destination) is special case that 
  * requires to lock the FTE for all the deletion process.
  */
@@ -408,6 +410,16 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
 	return NULL;
 }
 
+static void del_sw_ns(struct fs_node *node)
+{
+	kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+	kfree(node);
+}
+
 static void del_hw_flow_table(struct fs_node *node)
 {
 	struct mlx5_flow_table *ft;
@@ -2064,7 +2076,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
 		return ERR_PTR(-ENOMEM);
 
 	fs_prio->node.type = FS_TYPE_PRIO;
-	tree_init_node(&fs_prio->node, NULL, NULL);
+	tree_init_node(&fs_prio->node, NULL, del_sw_prio);
 	tree_add_node(&fs_prio->node, &ns->node);
 	fs_prio->num_levels = num_levels;
 	fs_prio->prio = prio;
@@ -2090,7 +2102,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
 		return ERR_PTR(-ENOMEM);
 
 	fs_init_namespace(ns);
-	tree_init_node(&ns->node, NULL, NULL);
+	tree_init_node(&ns->node, NULL, del_sw_ns);
 	tree_add_node(&ns->node, &prio->node);
 	list_add_tail(&ns->node.list, &prio->node.children);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a0e797ad001ad672c954c228350ff9bcdea125b..21d29f7936f6c5d1e26c6e0d3f10644fd0f096c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -241,7 +241,7 @@ static void print_health_info(struct mlx5_core_dev *dev)
 	u32 fw;
 	int i;
 
-	/* If the syndrom is 0, the device is OK and no need to print buffer */
+	/* If the syndrome is 0, the device is OK and no need to print buffer */
 	if (!ioread8(&h->synd))
 		return;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index d2a66dc4adc6d2933cfbc60c28cd49c67716a010..8812d7208e8f3522500b3f3e971b4a7341b22c8f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -57,7 +57,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
 				   struct mlx5e_params *params)
 {
 	/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
-	mlx5e_set_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+	mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
 
 	/* RQ size in ipoib by default is 512 */
 	params->log_rq_size = is_kdump_kernel() ?
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f26f97fe46666ff7a3f2ce6c496cb8936ee5cbb3..582b2f18010a317600f0238163557077d4c48a39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
 }
 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
 
+static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
+				       bool reset, void *out, int out_size)
+{
+	u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
+
+	MLX5_SET(query_cong_statistics_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+	MLX5_SET(query_cong_statistics_in, in, clear, reset);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
+}
+
 static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
 {
 	return dev->priv.lag;
@@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
 	/* If bonded, we do not add an IB device for PF1. */
 	return false;
 }
+
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+				 u64 *values,
+				 int num_counters,
+				 size_t *offsets)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
+	struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
+	struct mlx5_lag *ldev;
+	int num_ports;
+	int ret, i, j;
+	void *out;
+
+	out = kvzalloc(outlen, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	memset(values, 0, sizeof(*values) * num_counters);
+
+	mutex_lock(&lag_mutex);
+	ldev = mlx5_lag_dev_get(dev);
+	if (ldev && mlx5_lag_is_bonded(ldev)) {
+		num_ports = MLX5_MAX_PORTS;
+		mdev[0] = ldev->pf[0].dev;
+		mdev[1] = ldev->pf[1].dev;
+	} else {
+		num_ports = 1;
+		mdev[0] = dev;
+	}
+
+	for (i = 0; i < num_ports; ++i) {
+		ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
+		if (ret)
+			goto unlock;
+
+		for (j = 0; j < num_counters; ++j)
+			values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
+	}
+
+unlock:
+	mutex_unlock(&lag_mutex);
+	kvfree(out);
+	return ret;
+}
+EXPORT_SYMBOL(mlx5_lag_query_cong_counters);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 5f323442cc5ac009d5006438d93183e96b85d0d9..8a89c7e8cd631f2e14cb7cbac99a8983964b7eda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -317,9 +317,6 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 {
 	struct mlx5_priv *priv = &dev->priv;
 	struct mlx5_eq_table *table = &priv->eq_table;
-	struct irq_affinity irqdesc = {
-		.pre_vectors = MLX5_EQ_VEC_COMP_BASE,
-	};
 	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
 	int nvec;
 
@@ -333,10 +330,9 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
 	if (!priv->irq_info)
 		goto err_free_msix;
 
-	nvec = pci_alloc_irq_vectors_affinity(dev->pdev,
+	nvec = pci_alloc_irq_vectors(dev->pdev,
 			MLX5_EQ_VEC_COMP_BASE + 1, nvec,
-			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
-			&irqdesc);
+			PCI_IRQ_MSIX);
 	if (nvec < 0)
 		return nvec;
 
@@ -622,6 +618,63 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
 	return (u64)timer_l | (u64)timer_h1 << 32;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+	struct mlx5_priv *priv  = &mdev->priv;
+	int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+		mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+		return -ENOMEM;
+	}
+
+	cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+			priv->irq_info[i].mask);
+
+	if (IS_ENABLED(CONFIG_SMP) &&
+	    irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+		mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+	return 0;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+	struct mlx5_priv *priv  = &mdev->priv;
+	int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+
+	irq_set_affinity_hint(irq, NULL);
+	free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+		err = mlx5_irq_set_affinity_hint(mdev, i);
+		if (err)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	for (i--; i >= 0; i--)
+		mlx5_irq_clear_affinity_hint(mdev, i);
+
+	return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+	int i;
+
+	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+		mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 		    unsigned int *irqn)
 {
@@ -1097,6 +1150,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 		goto err_stop_eqs;
 	}
 
+	err = mlx5_irq_set_affinity_hints(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+		goto err_affinity_hints;
+	}
+
 	err = mlx5_init_fs(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed to init flow steering\n");
@@ -1154,6 +1213,9 @@ err_sriov:
 	mlx5_cleanup_fs(dev);
 
 err_fs:
+	mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
 	free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -1222,6 +1284,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 
 	mlx5_sriov_detach(dev);
 	mlx5_cleanup_fs(dev);
+	mlx5_irq_clear_affinity_hints(dev);
 	free_comp_eqs(dev);
 	mlx5_stop_eqs(dev);
 	mlx5_put_uars_page(dev, priv->uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index db9e665ab10474f934131b5c2a8fa2173fa5feb6..889130edb71525ecd1f46e88a11b2d3fa0ef843f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -213,8 +213,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
 err_cmd:
 	memset(din, 0, sizeof(din));
 	memset(dout, 0, sizeof(dout));
-	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+	MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+	MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
 	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
 	return err;
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
index e651e4c02867740d35c07bfcf485860f26ad6409..d3c33e9eea7292412974802c4c38ded8898ed55c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -125,16 +125,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
 	return ret_entry;
 }
 
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
 				   u32 rate, u16 index)
 {
-	u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
 
-	MLX5_SET(set_rate_limit_in, in, opcode,
-		 MLX5_CMD_OP_SET_RATE_LIMIT);
-	MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
-	MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+	MLX5_SET(set_pp_rate_limit_in, in, opcode,
+		 MLX5_CMD_OP_SET_PP_RATE_LIMIT);
+	MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
+	MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 }
 
@@ -173,7 +173,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
 		entry->refcount++;
 	} else {
 		/* new rate limit */
-		err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+		err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
 		if (err) {
 			mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
 				      rate, err);
@@ -209,7 +209,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
 	entry->refcount--;
 	if (!entry->refcount) {
 		/* need to remove rate */
-		mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+		mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
 		entry->rate = 0;
 	}
 
@@ -262,8 +262,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
 	/* Clear all configured rates */
 	for (i = 0; i < table->max_size; i++)
 		if (table->rl_entry[i].rate)
-			mlx5_set_rate_limit_cmd(dev, 0,
-						table->rl_entry[i].index);
+			mlx5_set_pp_rate_limit_cmd(dev, 0,
+						   table->rl_entry[i].index);
 
 	kfree(dev->priv.rl_table.rl_entry);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index 07a9ba6cfc70a11f7b4c05c73c1b32a704b7e6ba..2f74953e4561511e23d8fe3219db89104e3dd9e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
 	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
 	struct mlx5e_vxlan *vxlan;
 
-	spin_lock(&vxlan_db->lock);
+	spin_lock_bh(&vxlan_db->lock);
 	vxlan = radix_tree_lookup(&vxlan_db->tree, port);
-	spin_unlock(&vxlan_db->lock);
+	spin_unlock_bh(&vxlan_db->lock);
 
 	return vxlan;
 }
@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
 	struct mlx5e_vxlan *vxlan;
 	int err;
 
-	if (mlx5e_vxlan_lookup_port(priv, port))
+	mutex_lock(&priv->state_lock);
+	vxlan = mlx5e_vxlan_lookup_port(priv, port);
+	if (vxlan) {
+		atomic_inc(&vxlan->refcount);
 		goto free_work;
+	}
 
 	if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
 		goto free_work;
@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
 		goto err_delete_port;
 
 	vxlan->udp_port = port;
+	atomic_set(&vxlan->refcount, 1);
 
-	spin_lock_irq(&vxlan_db->lock);
+	spin_lock_bh(&vxlan_db->lock);
 	err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
-	spin_unlock_irq(&vxlan_db->lock);
+	spin_unlock_bh(&vxlan_db->lock);
 	if (err)
 		goto err_free;
 
@@ -113,35 +118,39 @@ err_free:
 err_delete_port:
 	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
 free_work:
+	mutex_unlock(&priv->state_lock);
 	kfree(vxlan_work);
 }
 
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
+	struct mlx5e_vxlan_work *vxlan_work =
+		container_of(work, struct mlx5e_vxlan_work, work);
+	struct mlx5e_priv *priv         = vxlan_work->priv;
 	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+	u16 port = vxlan_work->port;
 	struct mlx5e_vxlan *vxlan;
+	bool remove = false;
 
-	spin_lock_irq(&vxlan_db->lock);
-	vxlan = radix_tree_delete(&vxlan_db->tree, port);
-	spin_unlock_irq(&vxlan_db->lock);
-
+	mutex_lock(&priv->state_lock);
+	spin_lock_bh(&vxlan_db->lock);
+	vxlan = radix_tree_lookup(&vxlan_db->tree, port);
 	if (!vxlan)
-		return;
-
-	mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
-	kfree(vxlan);
-}
+		goto out_unlock;
 
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
-	struct mlx5e_vxlan_work *vxlan_work =
-		container_of(work, struct mlx5e_vxlan_work, work);
-	struct mlx5e_priv *priv = vxlan_work->priv;
-	u16 port = vxlan_work->port;
+	if (atomic_dec_and_test(&vxlan->refcount)) {
+		radix_tree_delete(&vxlan_db->tree, port);
+		remove = true;
+	}
 
-	__mlx5e_vxlan_core_del_port(priv, port);
+out_unlock:
+	spin_unlock_bh(&vxlan_db->lock);
 
+	if (remove) {
+		mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+		kfree(vxlan);
+	}
+	mutex_unlock(&priv->state_lock);
 	kfree(vxlan_work);
 }
 
@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
 	struct mlx5e_vxlan *vxlan;
 	unsigned int port = 0;
 
-	spin_lock_irq(&vxlan_db->lock);
+	/* Lockless since we are the only radix-tree consumers, wq is disabled */
 	while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
 		port = vxlan->udp_port;
-		spin_unlock_irq(&vxlan_db->lock);
-		__mlx5e_vxlan_core_del_port(priv, (u16)port);
-		spin_lock_irq(&vxlan_db->lock);
+		radix_tree_delete(&vxlan_db->tree, port);
+		mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+		kfree(vxlan);
 	}
-	spin_unlock_irq(&vxlan_db->lock);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 5def12c048e38992e7edd9f870233e369ef4580e..5ef6ae7d568abcd1410bc403b526628a634799cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -36,6 +36,7 @@
 #include "en.h"
 
 struct mlx5e_vxlan {
+	atomic_t refcount;
 	u16 udp_port;
 };
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 72ef4f8025f00ff8810c2955b25b7f3baec49be1..be657b8533f04922a61a2f3a4b1aeddf3137cdf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2436,25 +2436,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
 	rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
 }
 
-static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
-				    const struct mlxsw_sp_rif *rif)
-{
-	char rauht_pl[MLXSW_REG_RAUHT_LEN];
-
-	mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
-			     rif->rif_index, rif->addr);
-	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
-}
-
 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
 					 struct mlxsw_sp_rif *rif)
 {
 	struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
 
-	mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
 	list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
-				 rif_list_node)
+				 rif_list_node) {
+		mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
 		mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+	}
 }
 
 enum mlxsw_sp_nexthop_type {
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index e379b78e86efa7c02dca2bc95afc1f79afc7800a..13190aa09faf748c16e1f00f7aee9097442aef85 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -82,10 +82,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
 	return nfp_net_ebpf_capable(nn) ? "BPF" : "";
 }
 
+static int
+nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
+{
+	int err;
+
+	nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL);
+	if (!nn->app_priv)
+		return -ENOMEM;
+
+	err = nfp_app_nic_vnic_alloc(app, nn, id);
+	if (err)
+		goto err_free_priv;
+
+	return 0;
+err_free_priv:
+	kfree(nn->app_priv);
+	return err;
+}
+
 static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
 {
+	struct nfp_bpf_vnic *bv = nn->app_priv;
+
 	if (nn->dp.bpf_offload_xdp)
 		nfp_bpf_xdp_offload(app, nn, NULL);
+	WARN_ON(bv->tc_prog);
+	kfree(bv);
 }
 
 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
@@ -93,6 +116,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
 {
 	struct tc_cls_bpf_offload *cls_bpf = type_data;
 	struct nfp_net *nn = cb_priv;
+	struct bpf_prog *oldprog;
+	struct nfp_bpf_vnic *bv;
+	int err;
 
 	if (type != TC_SETUP_CLSBPF ||
 	    !tc_can_offload(nn->dp.netdev) ||
@@ -100,8 +126,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
 	    cls_bpf->common.protocol != htons(ETH_P_ALL) ||
 	    cls_bpf->common.chain_index)
 		return -EOPNOTSUPP;
-	if (nn->dp.bpf_offload_xdp)
-		return -EBUSY;
 
 	/* Only support TC direct action */
 	if (!cls_bpf->exts_integrated ||
@@ -110,16 +134,25 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
 		return -EOPNOTSUPP;
 	}
 
-	switch (cls_bpf->command) {
-	case TC_CLSBPF_REPLACE:
-		return nfp_net_bpf_offload(nn, cls_bpf->prog, true);
-	case TC_CLSBPF_ADD:
-		return nfp_net_bpf_offload(nn, cls_bpf->prog, false);
-	case TC_CLSBPF_DESTROY:
-		return nfp_net_bpf_offload(nn, NULL, true);
-	default:
+	if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
 		return -EOPNOTSUPP;
+
+	bv = nn->app_priv;
+	oldprog = cls_bpf->oldprog;
+
+	/* Don't remove if oldprog doesn't match driver's state */
+	if (bv->tc_prog != oldprog) {
+		oldprog = NULL;
+		if (!cls_bpf->prog)
+			return 0;
 	}
+
+	err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog);
+	if (err)
+		return err;
+
+	bv->tc_prog = cls_bpf->prog;
+	return 0;
 }
 
 static int nfp_bpf_setup_tc_block(struct net_device *netdev,
@@ -167,7 +200,7 @@ const struct nfp_app_type app_bpf = {
 
 	.extra_cap	= nfp_bpf_extra_cap,
 
-	.vnic_alloc	= nfp_app_nic_vnic_alloc,
+	.vnic_alloc	= nfp_bpf_vnic_alloc,
 	.vnic_free	= nfp_bpf_vnic_free,
 
 	.setup_tc	= nfp_bpf_setup_tc,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 082a15f6dfb5b9ba806316bd4f272006c93749fb..57b6043177a3891c49096ab85906ee539b3d5ecb 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -172,6 +172,14 @@ struct nfp_prog {
 	struct list_head insns;
 };
 
+/**
+ * struct nfp_bpf_vnic - per-vNIC BPF priv structure
+ * @tc_prog:	currently loaded cls_bpf program
+ */
+struct nfp_bpf_vnic {
+	struct bpf_prog *tc_prog;
+};
+
 int nfp_bpf_jit(struct nfp_prog *prog);
 
 extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 70c92b649b299a1a16195e144e3da77cff25855c..38c924bdd32e46f3586eac77d5a63c361993fd09 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -253,18 +253,18 @@ static int emac_open(struct net_device *netdev)
 		return ret;
 	}
 
-	ret = emac_mac_up(adpt);
+	ret = adpt->phy.open(adpt);
 	if (ret) {
 		emac_mac_rx_tx_rings_free_all(adpt);
 		free_irq(irq->irq, irq);
 		return ret;
 	}
 
-	ret = adpt->phy.open(adpt);
+	ret = emac_mac_up(adpt);
 	if (ret) {
-		emac_mac_down(adpt);
 		emac_mac_rx_tx_rings_free_all(adpt);
 		free_irq(irq->irq, irq);
+		adpt->phy.close(adpt);
 		return ret;
 	}
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e1e5ac0537606f2192d553c85795428b18fd615d..ce2ea2d491acac195eefe3f01f8ec9df08d3e77c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -409,7 +409,7 @@ struct stmmac_desc_ops {
 	/* get timestamp value */
 	 u64(*get_timestamp) (void *desc, u32 ats);
 	/* get rx timestamp status */
-	int (*get_rx_timestamp_status) (void *desc, u32 ats);
+	int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
 	/* Display ring */
 	void (*display_ring)(void *head, unsigned int size, bool rx);
 	/* set MSS via context descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4b286e27c4ca5cdbbb7c457e31bef1b2e9e7bd94..7e089bf906b4f316034403f9a44fbfd191ee09eb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -258,7 +258,8 @@ static int dwmac4_rx_check_timestamp(void *desc)
 	return ret;
 }
 
-static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+						 u32 ats)
 {
 	struct dma_desc *p = (struct dma_desc *)desc;
 	int ret = -EINVAL;
@@ -270,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
 
 			/* Check if timestamp is OK from context descriptor */
 			do {
-				ret = dwmac4_rx_check_timestamp(desc);
+				ret = dwmac4_rx_check_timestamp(next_desc);
 				if (ret < 0)
 					goto exit;
 				i++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7546b3664113a3d776fe19094df71b2adfb99e98..2a828a31281423082995bc332ec51a3f20989804 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -400,7 +400,8 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats)
 	return ns;
 }
 
-static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+					    u32 ats)
 {
 	if (ats) {
 		struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index f817f8f365696d3388e73f85710d30dde43a7d41..db4cee57bb2465eb98fe38cb947624e779da4673 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -265,7 +265,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats)
 	return ns;
 }
 
-static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
 {
 	struct dma_desc *p = (struct dma_desc *)desc;
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 721b616552611aa74ea077e744ec9a0c4836a48f..08c19ebd530674972ceb9ebcb41cd7af4b3fb58d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -34,6 +34,7 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 {
 	u32 value = readl(ioaddr + PTP_TCR);
 	unsigned long data;
+	u32 reg_value;
 
 	/* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
 	 *	formula = (1/ptp_clock) * 1000000000
@@ -50,10 +51,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
 
 	data &= PTP_SSIR_SSINC_MASK;
 
+	reg_value = data;
 	if (gmac4)
-		data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+		reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
 
-	writel(data, ioaddr + PTP_SSIR);
+	writel(reg_value, ioaddr + PTP_SSIR);
 
 	return data;
 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d7250539d0bd0c61c92fc9460c9e1197bb57ac8f..337d53d12e94b3acfe745e48422b44d1939ad2c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -482,7 +482,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 		desc = np;
 
 	/* Check if timestamp is available */
-	if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+	if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) {
 		ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 		shhwtstamp = skb_hwtstamps(skb);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b5a8f750e4337ce04fc46b3cf784cf84a2c42c77..82104edca393b9b6662a18ef8ea0bdd8d3bb057d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -879,6 +879,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
 
 	/* SGMII-to-Copper mode initialization */
 	if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
+		u32 pause;
+
 		/* Select page 18 */
 		err = marvell_set_page(phydev, 18);
 		if (err < 0)
@@ -902,6 +904,16 @@ static int m88e1510_config_init(struct phy_device *phydev)
 		err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
 		if (err < 0)
 			return err;
+
+		/* There appears to be a bug in the 88e1512 when used in
+		 * SGMII to copper mode, where the AN advertisment register
+		 * clears the pause bits each time a negotiation occurs.
+		 * This means we can never be truely sure what was advertised,
+		 * so disable Pause support.
+		 */
+		pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+		phydev->supported &= ~pause;
+		phydev->advertising &= ~pause;
 	}
 
 	return m88e1121_config_init(phydev);
@@ -2073,7 +2085,7 @@ static struct phy_driver marvell_drivers[] = {
 		.flags = PHY_HAS_INTERRUPT,
 		.probe = marvell_probe,
 		.config_init = &m88e1145_config_init,
-		.config_aneg = &marvell_config_aneg,
+		.config_aneg = &m88e1101_config_aneg,
 		.read_status = &genphy_read_status,
 		.ack_interrupt = &marvell_ack_interrupt,
 		.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index bfd3090fb055bac4c40924205036119da5b6ce61..07c6048200c6164ac77a649468063e2fedd404c6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -194,8 +194,11 @@ static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata)
 	}
 
 	ret = xgene_enet_ecc_init(pdata);
-	if (ret)
+	if (ret) {
+		if (pdata->dev->of_node)
+			clk_disable_unprepare(pdata->clk);
 		return ret;
+	}
 	xgene_gmac_reset(pdata);
 
 	return 0;
@@ -388,8 +391,10 @@ static int xgene_mdio_probe(struct platform_device *pdev)
 		return ret;
 
 	mdio_bus = mdiobus_alloc();
-	if (!mdio_bus)
-		return -ENOMEM;
+	if (!mdio_bus) {
+		ret = -ENOMEM;
+		goto out_clk;
+	}
 
 	mdio_bus->name = "APM X-Gene MDIO bus";
 
@@ -418,7 +423,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
 		mdio_bus->phy_mask = ~0;
 		ret = mdiobus_register(mdio_bus);
 		if (ret)
-			goto out;
+			goto out_mdiobus;
 
 		acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1,
 				    acpi_register_phy, NULL, mdio_bus, NULL);
@@ -426,16 +431,20 @@ static int xgene_mdio_probe(struct platform_device *pdev)
 	}
 
 	if (ret)
-		goto out;
+		goto out_mdiobus;
 
 	pdata->mdio_bus = mdio_bus;
 	xgene_mdio_status = true;
 
 	return 0;
 
-out:
+out_mdiobus:
 	mdiobus_free(mdio_bus);
 
+out_clk:
+	if (dev->of_node)
+		clk_disable_unprepare(pdata->clk);
+
 	return ret;
 }
 
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ab4614113403455c1eee1c2ad69c7cebc6da5c9d..422ff6333c52da8c4a212123bdaf443945613e02 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
 		phydev->link = 0;
 		if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
 			phydev->drv->config_intr(phydev);
+		return genphy_config_aneg(phydev);
 	}
 
 	return 0;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5dc9668dde34fe6b810c48f2bcd63e8609caa74e..827f3f92560e711a17514055cb96adfbc05708f7 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -526,6 +526,7 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
 	pl->link_config.pause = MLO_PAUSE_AN;
 	pl->link_config.speed = SPEED_UNKNOWN;
 	pl->link_config.duplex = DUPLEX_UNKNOWN;
+	pl->link_config.an_enabled = true;
 	pl->ops = ops;
 	__set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
 
@@ -951,6 +952,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 	mutex_lock(&pl->state_mutex);
 	/* Configure the MAC to match the new settings */
 	linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising);
+	pl->link_config.interface = config.interface;
 	pl->link_config.speed = our_kset.base.speed;
 	pl->link_config.duplex = our_kset.base.duplex;
 	pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 19b9cc51079e75346af766c91786d66eaa92c3f2..31f4b7911ef84c85789011332e37c5314099d82c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2155,6 +2155,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 		}
 
 		ndst = &rt->dst;
+		if (skb_dst(skb)) {
+			int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
+
+			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+						       skb, mtu);
+		}
+
 		tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
 		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
 		err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2190,6 +2197,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 				goto out_unlock;
 		}
 
+		if (skb_dst(skb)) {
+			int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
+
+			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL,
+						       skb, mtu);
+		}
+
 		tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
 		ttl = ttl ? : ip6_dst_hoplimit(ndst);
 		skb_scrub_packet(skb, xnet);
@@ -3103,6 +3117,11 @@ static void vxlan_config_apply(struct net_device *dev,
 
 		max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
 					   VXLAN_HEADROOM);
+		if (max_mtu < ETH_MIN_MTU)
+			max_mtu = ETH_MIN_MTU;
+
+		if (!changelink && !conf->mtu)
+			dev->mtu = max_mtu;
 	}
 
 	if (dev->mtu > max_mtu)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 10b075a46b266218c53d1e5674c1789e1e0f3d80..e8189c07b41f6b450f135ef703ec4e01568e311d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -684,6 +684,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
 	hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN);
 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
 					 IEEE80211_STYPE_NULLFUNC |
+					 IEEE80211_FCTL_TODS |
 					 (ps ? IEEE80211_FCTL_PM : 0));
 	hdr->duration_id = cpu_to_le16(0);
 	memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
@@ -3215,7 +3216,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
 		if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info)))
 			continue;
 
-		skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+		skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
 		if (!skb) {
 			res = -ENOMEM;
 			goto out_err;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index e949e3302af4743472ac26c68e9f4d54a27bb11a..c586bcdb5190b1c9f6447cb9a380568a03840988 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -211,12 +211,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
 	return ret;
 }
 
-static int btt_log_read_pair(struct arena_info *arena, u32 lane,
-			struct log_entry *ent)
+static int btt_log_group_read(struct arena_info *arena, u32 lane,
+			struct log_group *log)
 {
 	return arena_read_bytes(arena,
-			arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
-			2 * LOG_ENT_SIZE, 0);
+			arena->logoff + (lane * LOG_GRP_SIZE), log,
+			LOG_GRP_SIZE, 0);
 }
 
 static struct dentry *debugfs_root;
@@ -256,6 +256,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
 	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
 	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
 	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
+	debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
+	debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
 }
 
 static void btt_debugfs_init(struct btt *btt)
@@ -274,6 +276,11 @@ static void btt_debugfs_init(struct btt *btt)
 	}
 }
 
+static u32 log_seq(struct log_group *log, int log_idx)
+{
+	return le32_to_cpu(log->ent[log_idx].seq);
+}
+
 /*
  * This function accepts two log entries, and uses the
  * sequence number to find the 'older' entry.
@@ -283,8 +290,10 @@ static void btt_debugfs_init(struct btt *btt)
  *
  * TODO The logic feels a bit kludge-y. make it better..
  */
-static int btt_log_get_old(struct log_entry *ent)
+static int btt_log_get_old(struct arena_info *a, struct log_group *log)
 {
+	int idx0 = a->log_index[0];
+	int idx1 = a->log_index[1];
 	int old;
 
 	/*
@@ -292,23 +301,23 @@ static int btt_log_get_old(struct log_entry *ent)
 	 * the next time, the following logic works out to put this
 	 * (next) entry into [1]
 	 */
-	if (ent[0].seq == 0) {
-		ent[0].seq = cpu_to_le32(1);
+	if (log_seq(log, idx0) == 0) {
+		log->ent[idx0].seq = cpu_to_le32(1);
 		return 0;
 	}
 
-	if (ent[0].seq == ent[1].seq)
+	if (log_seq(log, idx0) == log_seq(log, idx1))
 		return -EINVAL;
-	if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
+	if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
 		return -EINVAL;
 
-	if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
-		if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
+	if (log_seq(log, idx0) < log_seq(log, idx1)) {
+		if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
 			old = 0;
 		else
 			old = 1;
 	} else {
-		if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
+		if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
 			old = 1;
 		else
 			old = 0;
@@ -328,17 +337,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
 {
 	int ret;
 	int old_ent, ret_ent;
-	struct log_entry log[2];
+	struct log_group log;
 
-	ret = btt_log_read_pair(arena, lane, log);
+	ret = btt_log_group_read(arena, lane, &log);
 	if (ret)
 		return -EIO;
 
-	old_ent = btt_log_get_old(log);
+	old_ent = btt_log_get_old(arena, &log);
 	if (old_ent < 0 || old_ent > 1) {
 		dev_err(to_dev(arena),
 				"log corruption (%d): lane %d seq [%d, %d]\n",
-			old_ent, lane, log[0].seq, log[1].seq);
+				old_ent, lane, log.ent[arena->log_index[0]].seq,
+				log.ent[arena->log_index[1]].seq);
 		/* TODO set error state? */
 		return -EIO;
 	}
@@ -346,7 +356,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
 	ret_ent = (old_flag ? old_ent : (1 - old_ent));
 
 	if (ent != NULL)
-		memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
+		memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
 
 	return ret_ent;
 }
@@ -360,17 +370,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
 			u32 sub, struct log_entry *ent, unsigned long flags)
 {
 	int ret;
-	/*
-	 * Ignore the padding in log_entry for calculating log_half.
-	 * The entry is 'committed' when we write the sequence number,
-	 * and we want to ensure that that is the last thing written.
-	 * We don't bother writing the padding as that would be extra
-	 * media wear and write amplification
-	 */
-	unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
-	u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
+	u32 group_slot = arena->log_index[sub];
+	unsigned int log_half = LOG_ENT_SIZE / 2;
 	void *src = ent;
+	u64 ns_off;
 
+	ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
+		(group_slot * LOG_ENT_SIZE);
 	/* split the 16B write into atomic, durable halves */
 	ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
 	if (ret)
@@ -453,7 +459,7 @@ static int btt_log_init(struct arena_info *arena)
 {
 	size_t logsize = arena->info2off - arena->logoff;
 	size_t chunk_size = SZ_4K, offset = 0;
-	struct log_entry log;
+	struct log_entry ent;
 	void *zerobuf;
 	int ret;
 	u32 i;
@@ -485,11 +491,11 @@ static int btt_log_init(struct arena_info *arena)
 	}
 
 	for (i = 0; i < arena->nfree; i++) {
-		log.lba = cpu_to_le32(i);
-		log.old_map = cpu_to_le32(arena->external_nlba + i);
-		log.new_map = cpu_to_le32(arena->external_nlba + i);
-		log.seq = cpu_to_le32(LOG_SEQ_INIT);
-		ret = __btt_log_write(arena, i, 0, &log, 0);
+		ent.lba = cpu_to_le32(i);
+		ent.old_map = cpu_to_le32(arena->external_nlba + i);
+		ent.new_map = cpu_to_le32(arena->external_nlba + i);
+		ent.seq = cpu_to_le32(LOG_SEQ_INIT);
+		ret = __btt_log_write(arena, i, 0, &ent, 0);
 		if (ret)
 			goto free;
 	}
@@ -594,6 +600,123 @@ static int btt_freelist_init(struct arena_info *arena)
 	return 0;
 }
 
+static bool ent_is_padding(struct log_entry *ent)
+{
+	return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
+		&& (ent->seq == 0);
+}
+
+/*
+ * Detecting valid log indices: We read a log group (see the comments in btt.h
+ * for a description of a 'log_group' and its 'slots'), and iterate over its
+ * four slots. We expect that a padding slot will be all-zeroes, and use this
+ * to detect a padding slot vs. an actual entry.
+ *
+ * If a log_group is in the initial state, i.e. hasn't been used since the
+ * creation of this BTT layout, it will have three of the four slots with
+ * zeroes. We skip over these log_groups for the detection of log_index. If
+ * all log_groups are in the initial state (i.e. the BTT has never been
+ * written to), it is safe to assume the 'new format' of log entries in slots
+ * (0, 1).
+ */
+static int log_set_indices(struct arena_info *arena)
+{
+	bool idx_set = false, initial_state = true;
+	int ret, log_index[2] = {-1, -1};
+	u32 i, j, next_idx = 0;
+	struct log_group log;
+	u32 pad_count = 0;
+
+	for (i = 0; i < arena->nfree; i++) {
+		ret = btt_log_group_read(arena, i, &log);
+		if (ret < 0)
+			return ret;
+
+		for (j = 0; j < 4; j++) {
+			if (!idx_set) {
+				if (ent_is_padding(&log.ent[j])) {
+					pad_count++;
+					continue;
+				} else {
+					/* Skip if index has been recorded */
+					if ((next_idx == 1) &&
+						(j == log_index[0]))
+						continue;
+					/* valid entry, record index */
+					log_index[next_idx] = j;
+					next_idx++;
+				}
+				if (next_idx == 2) {
+					/* two valid entries found */
+					idx_set = true;
+				} else if (next_idx > 2) {
+					/* too many valid indices */
+					return -ENXIO;
+				}
+			} else {
+				/*
+				 * once the indices have been set, just verify
+				 * that all subsequent log groups are either in
+				 * their initial state or follow the same
+				 * indices.
+				 */
+				if (j == log_index[0]) {
+					/* entry must be 'valid' */
+					if (ent_is_padding(&log.ent[j]))
+						return -ENXIO;
+				} else if (j == log_index[1]) {
+					;
+					/*
+					 * log_index[1] can be padding if the
+					 * lane never got used and it is still
+					 * in the initial state (three 'padding'
+					 * entries)
+					 */
+				} else {
+					/* entry must be invalid (padding) */
+					if (!ent_is_padding(&log.ent[j]))
+						return -ENXIO;
+				}
+			}
+		}
+		/*
+		 * If any of the log_groups have more than one valid,
+		 * non-padding entry, then the we are no longer in the
+		 * initial_state
+		 */
+		if (pad_count < 3)
+			initial_state = false;
+		pad_count = 0;
+	}
+
+	if (!initial_state && !idx_set)
+		return -ENXIO;
+
+	/*
+	 * If all the entries in the log were in the initial state,
+	 * assume new padding scheme
+	 */
+	if (initial_state)
+		log_index[1] = 1;
+
+	/*
+	 * Only allow the known permutations of log/padding indices,
+	 * i.e. (0, 1), and (0, 2)
+	 */
+	if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
+		; /* known index possibilities */
+	else {
+		dev_err(to_dev(arena), "Found an unknown padding scheme\n");
+		return -ENXIO;
+	}
+
+	arena->log_index[0] = log_index[0];
+	arena->log_index[1] = log_index[1];
+	dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
+	dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
+	return 0;
+}
+
 static int btt_rtt_init(struct arena_info *arena)
 {
 	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
@@ -650,8 +773,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
 	available -= 2 * BTT_PG_SIZE;
 
 	/* The log takes a fixed amount of space based on nfree */
-	logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
-				BTT_PG_SIZE);
+	logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
 	available -= logsize;
 
 	/* Calculate optimal split between map and data area */
@@ -668,6 +790,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
 	arena->mapoff = arena->dataoff + datasize;
 	arena->logoff = arena->mapoff + mapsize;
 	arena->info2off = arena->logoff + logsize;
+
+	/* Default log indices are (0,1) */
+	arena->log_index[0] = 0;
+	arena->log_index[1] = 1;
 	return arena;
 }
 
@@ -758,6 +884,13 @@ static int discover_arenas(struct btt *btt)
 		arena->external_lba_start = cur_nlba;
 		parse_arena_meta(arena, super, cur_off);
 
+		ret = log_set_indices(arena);
+		if (ret) {
+			dev_err(to_dev(arena),
+				"Unable to deduce log/padding indices\n");
+			goto out;
+		}
+
 		mutex_init(&arena->err_lock);
 		ret = btt_freelist_init(arena);
 		if (ret)
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 578c2057524d396fbf7c2eee88804b58c1f17cfe..db3cb6d4d0d495df8978494ff6619e2923478d32 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -27,6 +27,7 @@
 #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
 #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
 #define MAP_ENT_NORMAL 0xC0000000
+#define LOG_GRP_SIZE sizeof(struct log_group)
 #define LOG_ENT_SIZE sizeof(struct log_entry)
 #define ARENA_MIN_SIZE (1UL << 24)	/* 16 MB */
 #define ARENA_MAX_SIZE (1ULL << 39)	/* 512 GB */
@@ -50,12 +51,52 @@ enum btt_init_state {
 	INIT_READY
 };
 
+/*
+ * A log group represents one log 'lane', and consists of four log entries.
+ * Two of the four entries are valid entries, and the remaining two are
+ * padding. Due to an old bug in the padding location, we need to perform a
+ * test to determine the padding scheme being used, and use that scheme
+ * thereafter.
+ *
+ * In kernels prior to 4.15, 'log group' would have actual log entries at
+ * indices (0, 2) and padding at indices (1, 3), where as the correct/updated
+ * format has log entries at indices (0, 1) and padding at indices (2, 3).
+ *
+ * Old (pre 4.15) format:
+ * +-----------------+-----------------+
+ * |      ent[0]     |      ent[1]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq |       pad       |
+ * +-----------------------------------+
+ * |      ent[2]     |      ent[3]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq |       pad       |
+ * +-----------------+-----------------+
+ *
+ * New format:
+ * +-----------------+-----------------+
+ * |      ent[0]     |      ent[1]     |
+ * |       16B       |       16B       |
+ * | lba/old/new/seq | lba/old/new/seq |
+ * +-----------------------------------+
+ * |      ent[2]     |      ent[3]     |
+ * |       16B       |       16B       |
+ * |       pad       |       pad       |
+ * +-----------------+-----------------+
+ *
+ * We detect during start-up which format is in use, and set
+ * arena->log_index[(0, 1)] with the detected format.
+ */
+
 struct log_entry {
 	__le32 lba;
 	__le32 old_map;
 	__le32 new_map;
 	__le32 seq;
-	__le64 padding[2];
+};
+
+struct log_group {
+	struct log_entry ent[4];
 };
 
 struct btt_sb {
@@ -125,6 +166,8 @@ struct aligned_lock {
  * @list:		List head for list of arenas
  * @debugfs_dir:	Debugfs dentry
  * @flags:		Arena flags - may signify error states.
+ * @err_lock:		Mutex for synchronizing error clearing.
+ * @log_index:		Indices of the valid log entries in a log_group
  *
  * arena_info is a per-arena handle. Once an arena is narrowed down for an
  * IO, this struct is passed around for the duration of the IO.
@@ -157,6 +200,7 @@ struct arena_info {
 	/* Arena flags */
 	u32 flags;
 	struct mutex err_lock;
+	int log_index[2];
 };
 
 /**
@@ -176,6 +220,7 @@ struct arena_info {
  * @init_lock:		Mutex used for the BTT initialization
  * @init_state:		Flag describing the initialization state for the BTT
  * @num_arenas:		Number of arenas in the BTT instance
+ * @phys_bb:		Pointer to the namespace's badblocks structure
  */
 struct btt {
 	struct gendisk *btt_disk;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 65cc171c721de8774baf05f9650f3bbacf511eec..2adada1a58551776186d6f6928a437d462734a48 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
 	u64 checksum, offset;
-	unsigned long align;
 	enum nd_pfn_mode mode;
 	struct nd_namespace_io *nsio;
+	unsigned long align, start_pad;
 	struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
 	struct nd_namespace_common *ndns = nd_pfn->ndns;
 	const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev);
@@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 
 	align = le32_to_cpu(pfn_sb->align);
 	offset = le64_to_cpu(pfn_sb->dataoff);
+	start_pad = le32_to_cpu(pfn_sb->start_pad);
 	if (align == 0)
 		align = 1UL << ilog2(offset);
 	mode = le32_to_cpu(pfn_sb->mode);
@@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 		return -EBUSY;
 	}
 
-	if ((align && !IS_ALIGNED(offset, align))
+	if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
 			|| !IS_ALIGNED(offset, PAGE_SIZE)) {
 		dev_err(&nd_pfn->dev,
 				"bad offset: %#llx dax disabled align: %#lx\n",
@@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
 	return altmap;
 }
 
+static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
+{
+	return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys),
+			ALIGN_DOWN(phys, nd_pfn->align));
+}
+
 static int nd_pfn_init(struct nd_pfn *nd_pfn)
 {
 	u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
@@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
 	start = nsio->res.start;
 	size = PHYS_SECTION_ALIGN_UP(start + size) - start;
 	if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-				IORES_DESC_NONE) == REGION_MIXED) {
+				IORES_DESC_NONE) == REGION_MIXED
+			|| !IS_ALIGNED(start + resource_size(&nsio->res),
+				nd_pfn->align)) {
 		size = resource_size(&nsio->res);
-		end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+		end_trunc = start + size - phys_pmem_align_down(nd_pfn,
+				start + size);
 	}
 
 	if (start_pad + end_trunc)
-		dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+		dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
 				dev_name(&ndns->dev), start_pad + end_trunc);
 
 	/*
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f837d666cbd499c8e33a1514f55344a1796005a1..1e46e60b8f1080e339ebe81c1710dabb23afef75 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1287,7 +1287,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
 	BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
 			NVME_DSM_MAX_RANGES);
 
-	queue->limits.discard_alignment = size;
+	queue->limits.discard_alignment = 0;
 	queue->limits.discard_granularity = size;
 
 	blk_queue_max_discard_sectors(queue, UINT_MAX);
@@ -1705,7 +1705,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
 		blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
 		blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
 	}
-	if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+	if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+	    is_power_of_2(ctrl->max_hw_sectors))
 		blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
 	blk_queue_virt_boundary(q, ctrl->page_size - 1);
 	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
@@ -2869,7 +2870,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
 	nvme_set_queue_limits(ctrl, ns->queue);
-	nvme_setup_streams_ns(ctrl, ns);
 
 	id = nvme_identify_ns(ctrl, nsid);
 	if (!id)
@@ -2880,6 +2880,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	if (nvme_init_ns_head(ns, nsid, id, &new))
 		goto out_free_id;
+	nvme_setup_streams_ns(ctrl, ns);
 	
 #ifdef CONFIG_NVME_MULTIPATH
 	/*
@@ -2965,8 +2966,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 		return;
 
 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-		if (blk_get_integrity(ns->disk))
-			blk_integrity_unregister(ns->disk);
 		nvme_mpath_remove_disk_links(ns);
 		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
 					&nvme_ns_id_attr_group);
@@ -2974,6 +2973,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 			nvme_nvm_unregister_sysfs(ns);
 		del_gendisk(ns->disk);
 		blk_cleanup_queue(ns->queue);
+		if (blk_get_integrity(ns->disk))
+			blk_integrity_unregister(ns->disk);
 	}
 
 	mutex_lock(&ns->ctrl->subsys->lock);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0a8af4daef8903f8ba983d345f1044498c57a975..794e66e4aa20115f4dc3a6b5fc12f706b2040bf4 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3221,7 +3221,6 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
 		/* initiate nvme ctrl ref counting teardown */
 		nvme_uninit_ctrl(&ctrl->ctrl);
-		nvme_put_ctrl(&ctrl->ctrl);
 
 		/* Remove core ctrl ref. */
 		nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index a346b4923550829eca8c7a8176c4ad39dbd54f7c..41d3a3c1104ec7f810c7f9c33a2cab414c475299 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -156,8 +156,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset,
 				 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
 				 MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
 
-	for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
-		addr = i / efuse->config.word_size;
+	for (i = 0; i < bytes; i += efuse->config.word_size) {
+		addr = (offset + i) / efuse->config.word_size;
 
 		err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
 		if (err)
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index a25fed52f7e94de4bd3dd5cb8b0922e1df8e81bf..41b740aed3a346e4bbc610959281649447f83bd4 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1692,3 +1692,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
 	iounmap(base_addr);
 }
 
+
+/*
+ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
+ * seems rushed, so that many built-in components simply don't work.
+ * The following quirks disable the serial AUX port and the built-in ATI RV100
+ * Radeon 7000 graphics card which both don't have any external connectors and
+ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
+ * such makes those machines the only PARISC machines on which we can't use
+ * ttyS0 as boot console.
+ */
+static void quirk_diva_ati_card(struct pci_dev *dev)
+{
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+	    dev->subsystem_device != 0x1292)
+		return;
+
+	dev_info(&dev->dev, "Hiding Diva built-in ATI card");
+	dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
+	quirk_diva_ati_card);
+
+static void quirk_diva_aux_disable(struct pci_dev *dev)
+{
+	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
+	    dev->subsystem_device != 0x1291)
+		return;
+
+	dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
+	dev->device = 0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
+	quirk_diva_aux_disable);
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 04dac6a42c9f287519379757a0cefd3c08e14e11..6b8d060d07de7d8ba2fc66dd3a0b0087470f4b82 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -985,9 +985,7 @@ static u32 hv_compose_msi_req_v1(
 	int_pkt->wslot.slot = slot;
 	int_pkt->int_desc.vector = vector;
 	int_pkt->int_desc.vector_count = 1;
-	int_pkt->int_desc.delivery_mode =
-		(apic->irq_delivery_mode == dest_LowestPrio) ?
-			dest_LowestPrio : dest_Fixed;
+	int_pkt->int_desc.delivery_mode = dest_Fixed;
 
 	/*
 	 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1008,9 +1006,7 @@ static u32 hv_compose_msi_req_v2(
 	int_pkt->wslot.slot = slot;
 	int_pkt->int_desc.vector = vector;
 	int_pkt->int_desc.vector_count = 1;
-	int_pkt->int_desc.delivery_mode =
-		(apic->irq_delivery_mode == dest_LowestPrio) ?
-			dest_LowestPrio : dest_Fixed;
+	int_pkt->int_desc.delivery_mode = dest_Fixed;
 
 	/*
 	 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 945099d49f8f9b08c2b159d2cf4899b1fdc4cea4..14fd865a512096393149fd63a3707305648f276f 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1012,7 +1012,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
 	if (pci_has_legacy_pm_support(pci_dev))
 		return pci_legacy_resume_early(dev);
 
-	pci_update_current_state(pci_dev, PCI_D0);
+	/*
+	 * pci_restore_state() requires the device to be in D0 (because of MSI
+	 * restoration among other things), so force it into D0 in case the
+	 * driver's "freeze" callbacks put it into a low-power state directly.
+	 */
+	pci_set_power_state(pci_dev, PCI_D0);
 	pci_restore_state(pci_dev);
 
 	if (drv && drv->pm && drv->pm->thaw_noirq)
diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
index accaaaccb662fd24b1b3cf5f8e22154f5f46f9eb..6601ad0dfb3ad23c77865138b3db7bd0c4f55ae8 100644
--- a/drivers/phy/motorola/phy-cpcap-usb.c
+++ b/drivers/phy/motorola/phy-cpcap-usb.c
@@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
 	int irq, error;
 
 	irq = platform_get_irq_byname(pdev, name);
-	if (!irq)
+	if (irq < 0)
 		return -ENODEV;
 
 	error = devm_request_threaded_irq(ddata->dev, irq, NULL,
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index cb09245e9b4c730528a7019b44f89c5abd20c7d9..c845facacb063e9a0bd2b244869d3c22c3409fbf 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -12,7 +12,9 @@ config PHY_RCAR_GEN3_USB2
 	tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
 	depends on ARCH_RENESAS
 	depends on EXTCON
+	depends on USB_SUPPORT
 	select GENERIC_PHY
+	select USB_COMMON
 	help
 	  Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
 
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index ee85fa0ca4b05bac340121cd798291708091e59e..7492c8978217f45e04f591898bfb0ddbaac2a217 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1137,6 +1137,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
 		if (IS_ERR(phy)) {
 			dev_err(dev, "failed to create phy: %s\n",
 				child_np->name);
+			pm_runtime_disable(dev);
 			return PTR_ERR(phy);
 		}
 
@@ -1146,6 +1147,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
 	phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
 	if (IS_ERR(phy_provider)) {
 		dev_err(dev, "Failed to register phy provider\n");
+		pm_runtime_disable(dev);
 		return PTR_ERR(phy_provider);
 	}
 
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 4307bf0013e186cd859b779aaaf4c61ddd662a6c..63e916d4d0696156b244fb2c7e0e5f6de606c8e3 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
 static struct device_node *
 tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
 {
-	/*
-	 * of_find_node_by_name() drops a reference, so make sure to grab one.
-	 */
-	struct device_node *np = of_node_get(padctl->dev->of_node);
+	struct device_node *pads, *np;
+
+	pads = of_get_child_by_name(padctl->dev->of_node, "pads");
+	if (!pads)
+		return NULL;
 
-	np = of_find_node_by_name(np, "pads");
-	if (np)
-		np = of_find_node_by_name(np, name);
+	np = of_get_child_by_name(pads, name);
+	of_node_put(pads);
 
 	return np;
 }
@@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
 static struct device_node *
 tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
 {
-	/*
-	 * of_find_node_by_name() drops a reference, so make sure to grab one.
-	 */
-	struct device_node *np = of_node_get(pad->dev.of_node);
+	struct device_node *np, *lanes;
 
-	np = of_find_node_by_name(np, "lanes");
-	if (!np)
+	lanes = of_get_child_by_name(pad->dev.of_node, "lanes");
+	if (!lanes)
 		return NULL;
 
-	return of_find_node_by_name(np, pad->soc->lanes[index].name);
+	np = of_get_child_by_name(lanes, pad->soc->lanes[index].name);
+	of_node_put(lanes);
+
+	return np;
 }
 
 static int
@@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
 	unsigned int i;
 	int err;
 
-	children = of_find_node_by_name(pad->dev.of_node, "lanes");
+	children = of_get_child_by_name(pad->dev.of_node, "lanes");
 	if (!children)
 		return -ENODEV;
 
@@ -444,21 +444,21 @@ static struct device_node *
 tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
 			  unsigned int index)
 {
-	/*
-	 * of_find_node_by_name() drops a reference, so make sure to grab one.
-	 */
-	struct device_node *np = of_node_get(padctl->dev->of_node);
+	struct device_node *ports, *np;
+	char *name;
 
-	np = of_find_node_by_name(np, "ports");
-	if (np) {
-		char *name;
+	ports = of_get_child_by_name(padctl->dev->of_node, "ports");
+	if (!ports)
+		return NULL;
 
-		name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
-		if (!name)
-			return ERR_PTR(-ENOMEM);
-		np = of_find_node_by_name(np, name);
-		kfree(name);
+	name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+	if (!name) {
+		of_node_put(ports);
+		return ERR_PTR(-ENOMEM);
 	}
+	np = of_get_child_by_name(ports, name);
+	kfree(name);
+	of_node_put(ports);
 
 	return np;
 }
@@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
 
 static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 {
-	struct device_node *np = of_node_get(pdev->dev.of_node);
+	struct device_node *np = pdev->dev.of_node;
 	const struct tegra_xusb_padctl_soc *soc;
 	struct tegra_xusb_padctl *padctl;
 	const struct of_device_id *match;
@@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
 	int err;
 
 	/* for backwards compatibility with old device trees */
-	np = of_find_node_by_name(np, "pads");
+	np = of_get_child_by_name(np, "pads");
 	if (!np) {
 		dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
 		return tegra_xusb_padctl_legacy_probe(pdev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index bdedb6325c72a5fa0966377d53678722d18f07eb..4471fd94e1fe1f48b953360ad76e638e88f1a7ff 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 			clear_bit(i, chip->irq.valid_mask);
 	}
 
+	/*
+	 * The same set of machines in chv_no_valid_mask[] have incorrectly
+	 * configured GPIOs that generate spurious interrupts so we use
+	 * this same list to apply another quirk for them.
+	 *
+	 * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953.
+	 */
+	if (!need_valid_mask) {
+		/*
+		 * Mask all interrupts the community is able to generate
+		 * but leave the ones that can only generate GPEs unmasked.
+		 */
+		chv_writel(GENMASK(31, pctrl->community->nirqs),
+			   pctrl->regs + CHV_INTMASK);
+	}
+
 	/* Clear all interrupts */
 	chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
 
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index e6cd8de793e2ae66c791389ff686b42ad8084d84..3501491e5bfc8a5547c3ac820e01ad338dbb097b 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = {
  */
 static struct lock_class_key pcs_lock_class;
 
+/* Class for the IRQ request mutex */
+static struct lock_class_key pcs_request_class;
+
 /*
  * REVISIT: Reads and writes could eventually use regmap or something
  * generic. But at least on omaps, some mux registers are performance
@@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq,
 	irq_set_chip_data(irq, pcs_soc);
 	irq_set_chip_and_handler(irq, &pcs->chip,
 				 handle_level_irq);
-	irq_set_lockdep_class(irq, &pcs_lock_class);
+	irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class);
 	irq_set_noprobe(irq);
 
 	return 0;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index a276c61be217b41b1ce35d33875d8a532033eff0..e62ab087bfd8afd788236d11137405d4abfca3f5 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d,
 }
 
 static int stm32_gpio_domain_activate(struct irq_domain *d,
-				      struct irq_data *irq_data, bool early)
+				      struct irq_data *irq_data, bool reserve)
 {
 	struct stm32_gpio_bank *bank = d->host_data;
 	struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6c815207f4f50470c1a0fea8992183aa67f69e37..3614df68830f8f6a4abd756c52ca4e1e72e8e1d1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5386,6 +5386,13 @@ out:
 }
 EXPORT_SYMBOL_GPL(qeth_poll);
 
+static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+{
+	if (!cmd->hdr.return_code)
+		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
+	return cmd->hdr.return_code;
+}
+
 int qeth_setassparms_cb(struct qeth_card *card,
 			struct qeth_reply *reply, unsigned long data)
 {
@@ -6242,7 +6249,7 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
 				(struct qeth_checksum_cmd *)reply->param;
 
 	QETH_CARD_TEXT(card, 4, "chkdoccb");
-	if (cmd->hdr.return_code)
+	if (qeth_setassparms_inspect_rc(cmd))
 		return 0;
 
 	memset(chksum_cb, 0, sizeof(*chksum_cb));
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 6e3d81969a77cc895580f79fa8e3aaa3b8bb4fee..d52265416da2af0da11cca770304f33ab203ad20 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1725,6 +1725,7 @@ struct aac_dev
 #define FIB_CONTEXT_FLAG_NATIVE_HBA		(0x00000010)
 #define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF	(0x00000020)
 #define FIB_CONTEXT_FLAG_SCSI_CMD	(0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET	(0x00000080)
 
 /*
  *	Define the command values
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index bdf127aaab41d814e2337d2944166a0498bf1a66..d55332de08f91ad8e54e1296867569a8fa109a34 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1037,7 +1037,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
 			info = &aac->hba_map[bus][cid];
 			if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
 			    info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
-				fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
+				fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
 				cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
 			}
 		}
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index a4f28b7e4c65df81ef583eab878a3aa9fc45e0e4..e18877177f1b52d9c43ad3b991b858c80a6cc079 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1576,7 +1576,9 @@ static struct request *_make_request(struct request_queue *q, bool has_write,
 		return req;
 
 	for_each_bio(bio) {
-		ret = blk_rq_append_bio(req, bio);
+		struct bio *bounce_bio = bio;
+
+		ret = blk_rq_append_bio(req, &bounce_bio);
 		if (ret)
 			return ERR_PTR(ret);
 	}
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 449ef5adbb2bc3719b3ba72953ebbecef9e4d086..dfb8da83fa504c979e9ba0639b32a4cae2c8969c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -374,10 +374,8 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
 			    model, compatible);
 
 	if (strflags)
-		devinfo->flags = simple_strtoul(strflags, NULL, 0);
-	else
-		devinfo->flags = flags;
-
+		flags = (__force blist_flags_t)simple_strtoul(strflags, NULL, 0);
+	devinfo->flags = flags;
 	devinfo->compatible = compatible;
 
 	if (compatible)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index be5e919db0e8cd9e713727a91bc46923673ea556..0880d975eed3a56c58d27172bfd18c1a59da5d4b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -770,7 +770,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
-		int *bflags, int async)
+		blist_flags_t *bflags, int async)
 {
 	int ret;
 
@@ -1049,14 +1049,15 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
  *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
  **/
 static int scsi_probe_and_add_lun(struct scsi_target *starget,
-				  u64 lun, int *bflagsp,
+				  u64 lun, blist_flags_t *bflagsp,
 				  struct scsi_device **sdevp,
 				  enum scsi_scan_mode rescan,
 				  void *hostdata)
 {
 	struct scsi_device *sdev;
 	unsigned char *result;
-	int bflags, res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
+	blist_flags_t bflags;
+	int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 
 	/*
@@ -1201,7 +1202,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
  *     Modifies sdevscan->lun.
  **/
 static void scsi_sequential_lun_scan(struct scsi_target *starget,
-				     int bflags, int scsi_level,
+				     blist_flags_t bflags, int scsi_level,
 				     enum scsi_scan_mode rescan)
 {
 	uint max_dev_lun;
@@ -1292,7 +1293,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
  *     0: scan completed (or no memory, so further scanning is futile)
  *     1: could not scan with REPORT LUN
  **/
-static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
+static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
 				enum scsi_scan_mode rescan)
 {
 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1538,7 +1539,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
 		unsigned int id, u64 lun, enum scsi_scan_mode rescan)
 {
 	struct Scsi_Host *shost = dev_to_shost(parent);
-	int bflags = 0;
+	blist_flags_t bflags = 0;
 	int res;
 	struct scsi_target *starget;
 
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 50e7d7e4a86179b9a47d18569bb759be0b674b88..26ce17178401b645bd9548e49dd11fecb3babf8c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -967,7 +967,8 @@ sdev_show_wwid(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(wwid, S_IRUGO, sdev_show_wwid, NULL);
 
-#define BLIST_FLAG_NAME(name) [ilog2(BLIST_##name)] = #name
+#define BLIST_FLAG_NAME(name)					\
+	[ilog2((__force unsigned int)BLIST_##name)] = #name
 static const char *const sdev_bflags_name[] = {
 #include "scsi_devinfo_tbl.c"
 };
@@ -984,7 +985,7 @@ sdev_show_blacklist(struct device *dev, struct device_attribute *attr,
 	for (i = 0; i < sizeof(sdev->sdev_bflags) * BITS_PER_BYTE; i++) {
 		const char *name = NULL;
 
-		if (!(sdev->sdev_bflags & BIT(i)))
+		if (!(sdev->sdev_bflags & (__force blist_flags_t)BIT(i)))
 			continue;
 		if (i < ARRAY_SIZE(sdev_bflags_name) && sdev_bflags_name[i])
 			name = sdev_bflags_name[i];
@@ -1414,7 +1415,10 @@ static void __scsi_remove_target(struct scsi_target *starget)
 		 * check.
 		 */
 		if (sdev->channel != starget->channel ||
-		    sdev->id != starget->id ||
+		    sdev->id != starget->id)
+			continue;
+		if (sdev->sdev_state == SDEV_DEL ||
+		    sdev->sdev_state == SDEV_CANCEL ||
 		    !get_device(&sdev->sdev_gendev))
 			continue;
 		spin_unlock_irqrestore(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index d0219e36080c3b79109ac405eb0cd726545585fc..10ebb213ddb33e2920e2fe83e60cc712a50c3002 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -50,14 +50,14 @@
 
 /* Our blacklist flags */
 enum {
-	SPI_BLIST_NOIUS = 0x1,
+	SPI_BLIST_NOIUS = (__force blist_flags_t)0x1,
 };
 
 /* blacklist table, modelled on scsi_devinfo.c */
 static struct {
 	char *vendor;
 	char *model;
-	unsigned flags;
+	blist_flags_t flags;
 } spi_static_device_list[] __initdata = {
 	{"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS },
 	{"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS },
@@ -221,9 +221,11 @@ static int spi_device_configure(struct transport_container *tc,
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct scsi_target *starget = sdev->sdev_target;
-	unsigned bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
-						      &sdev->inquiry[16],
-						      SCSI_DEVINFO_SPI);
+	blist_flags_t bflags;
+
+	bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8],
+					     &sdev->inquiry[16],
+					     SCSI_DEVINFO_SPI);
 
 	/* Populate the target capability fields with the values
 	 * gleaned from the device inquiry */
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 1b06cf0375dcdbd6f1780ab7535bbaa4c5742916..3b3d1d050cacaa3d83dc615e29691ccc2c5de87f 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
 		case TEST_UNIT_READY:
 			break;
 		default:
-			set_host_byte(scmnd, DID_TARGET_FAILURE);
+			set_host_byte(scmnd, DID_ERROR);
 		}
 		break;
 	case SRB_STATUS_INVALID_LUN:
+		set_host_byte(scmnd, DID_NO_CONNECT);
 		do_work = true;
 		process_err_fn = storvsc_remove_lun;
 		break;
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 77fe55ce790c61a8835c4e2338a36be43dafcbac..d65345312527ce450b539964aa0465e1e6787b44 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -79,6 +79,7 @@
 #define A3700_SPI_BYTE_LEN		BIT(5)
 #define A3700_SPI_CLK_PRESCALE		BIT(0)
 #define A3700_SPI_CLK_PRESCALE_MASK	(0x1f)
+#define A3700_SPI_CLK_EVEN_OFFS		(0x10)
 
 #define A3700_SPI_WFIFO_THRS_BIT	28
 #define A3700_SPI_RFIFO_THRS_BIT	24
@@ -220,6 +221,13 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi,
 
 	prescale = DIV_ROUND_UP(clk_get_rate(a3700_spi->clk), speed_hz);
 
+	/* For prescaler values over 15, we can only set it by steps of 2.
+	 * Starting from A3700_SPI_CLK_EVEN_OFFS, we set values from 0 up to
+	 * 30. We only use this range from 16 to 30.
+	 */
+	if (prescale > 15)
+		prescale = A3700_SPI_CLK_EVEN_OFFS + DIV_ROUND_UP(prescale, 2);
+
 	val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
 	val = val & ~A3700_SPI_CLK_PRESCALE_MASK;
 
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index f95da364c2832b0142158e12c97648aab81b7165..66947097102370d0f54ba2559abddab1ac812a5d 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1661,12 +1661,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
 	pm_runtime_get_sync(&pdev->dev);
 
 	/* reset the hardware and block queue progress */
-	spin_lock_irq(&as->lock);
 	if (as->use_dma) {
 		atmel_spi_stop_dma(master);
 		atmel_spi_release_dma(master);
 	}
 
+	spin_lock_irq(&as->lock);
 	spi_writel(as, CR, SPI_BIT(SWRST));
 	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
 	spi_readl(as, SR);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 2ce875764ca646a2bdfb803cae33465ab8fa1786..0835a8d88fb8f85ab5ae44a4aa74d94121d19d87 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -377,8 +377,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
 	/* Sets SPCMD */
 	rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
 
-	/* Enables SPI function in master mode */
-	rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+	/* Sets RSPI mode */
+	rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
 
 	return 0;
 }
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index c5cd635c28f388bec2cfd47b9a6c6c9dcec9e046..41410031f8e99e6a1d54b8f94990df0133356ced 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -525,7 +525,7 @@ err_free_master:
 
 static int sun4i_spi_remove(struct platform_device *pdev)
 {
-	pm_runtime_disable(&pdev->dev);
+	pm_runtime_force_suspend(&pdev->dev);
 
 	return 0;
 }
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index bc7100b93dfcf0c24213f479f9a5fffc41666315..e0b9fe1d0e37d98a7243ca35a56b1d62e024e8b3 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 	while (remaining_words) {
 		int n_words, tx_words, rx_words;
 		u32 sr;
+		int stalled;
 
 		n_words = min(remaining_words, xspi->buffer_size);
 
@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 
 		/* Read out all the data from the Rx FIFO */
 		rx_words = n_words;
+		stalled = 10;
 		while (rx_words) {
+			if (rx_words == n_words && !(stalled--) &&
+			    !(sr & XSPI_SR_TX_EMPTY_MASK) &&
+			    (sr & XSPI_SR_RX_EMPTY_MASK)) {
+				dev_err(&spi->dev,
+					"Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
+				xspi_init_hw(xspi);
+				return -EIO;
+			}
+
 			if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
 				xilinx_spi_rx(xspi);
 				rx_words--;
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index a517b2d29f1bb6efce2f22a5520a009c1375bcfb..8f6494158d3d018b847ad989f6fa1c2648848094 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP
 
 config ION_CMA_HEAP
 	bool "Ion CMA heap support"
-	depends on ION && CMA
+	depends on ION && DMA_CMA
 	help
 	  Choose this option to enable CMA heaps with Ion. This heap is backed
 	  by the Contiguous Memory Allocator (CMA). If your system has these
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a7d9b0e9857225abf7a6c88885d15e76afc6627b..f480885e346b69cb6ec881abefca0ee3aaf9fd15 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
 	mutex_lock(&buffer->lock);
 	list_for_each_entry(a, &buffer->attachments, list) {
 		dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
-				    DMA_BIDIRECTIONAL);
+				    direction);
 	}
 	mutex_unlock(&buffer->lock);
 
@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 	mutex_lock(&buffer->lock);
 	list_for_each_entry(a, &buffer->attachments, list) {
 		dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
-				       DMA_BIDIRECTIONAL);
+				       direction);
 	}
 	mutex_unlock(&buffer->lock);
 
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index dd5545d9990a0f61ac2a1ba6249ec851602d1b3b..86196ffd2faf9a7733cd931fa48e24e5ace3fd22 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
 	struct sg_table *table;
 	struct page *pages;
+	unsigned long size = PAGE_ALIGN(len);
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+	unsigned long align = get_order(size);
 	int ret;
 
-	pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
+	if (align > CONFIG_CMA_ALIGNMENT)
+		align = CONFIG_CMA_ALIGNMENT;
+
+	pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
 	if (!pages)
 		return -ENOMEM;
 
@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 	if (ret)
 		goto free_mem;
 
-	sg_set_page(table->sgl, pages, len, 0);
+	sg_set_page(table->sgl, pages, size, 0);
 
 	buffer->priv_virt = pages;
 	buffer->sg_table = table;
@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 free_mem:
 	kfree(table);
 err:
-	cma_release(cma_heap->cma, pages, buffer->size);
+	cma_release(cma_heap->cma, pages, nr_pages);
 	return -ENOMEM;
 }
 
@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
 {
 	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
 	struct page *pages = buffer->priv_virt;
+	unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
 
 	/* release memory */
-	cma_release(cma_heap->cma, pages, buffer->size);
+	cma_release(cma_heap->cma, pages, nr_pages);
 	/* release sg table */
 	sg_free_table(buffer->sg_table);
 	kfree(buffer->sg_table);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 986c2a40d9780ecbbc2226d9be6fa74876e85681..8267119ccc8e73108bc8e4f2edb777861192f353 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
 			      ksocknal_nid2peerlist(id.nid));
 	}
 
-	route2 = NULL;
 	list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
-		if (route2->ksnr_ipaddr == ipaddr)
-			break;
-
-		route2 = NULL;
-	}
-	if (!route2) {
-		ksocknal_add_route_locked(peer, route);
-		route->ksnr_share_count++;
-	} else {
-		ksocknal_route_decref(route);
-		route2->ksnr_share_count++;
+		if (route2->ksnr_ipaddr == ipaddr) {
+			/* Route already exists, use the old one */
+			ksocknal_route_decref(route);
+			route2->ksnr_share_count++;
+			goto out;
+		}
 	}
-
+	/* Route doesn't already exist, add the new one */
+	ksocknal_add_route_locked(peer, route);
+	route->ksnr_share_count++;
+out:
 	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
 	return 0;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7c69b4a9694d2016a8aac3b63a4b7d4399146688..0d99b242e82e3f84da25a47564f96db60be4b5f5 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -920,7 +920,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 					" %d i: %d bio: %p, allocating another"
 					" bio\n", bio->bi_vcnt, i, bio);
 
-				rc = blk_rq_append_bio(req, bio);
+				rc = blk_rq_append_bio(req, &bio);
 				if (rc) {
 					pr_err("pSCSI: failed to append bio\n");
 					goto fail;
@@ -938,7 +938,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 	}
 
 	if (bio) {
-		rc = blk_rq_append_bio(req, bio);
+		rc = blk_rq_append_bio(req, &bio);
 		if (rc) {
 			pr_err("pSCSI: failed to append bio\n");
 			goto fail;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 419a7a90bce0e21c40afc165506420aa9146fb72..f45bcbc63738ffb3598e958e1af529c443b98e1d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -339,7 +339,7 @@ static void __ring_interrupt(struct tb_ring *ring)
 		return;
 
 	if (ring->start_poll) {
-		__ring_interrupt_mask(ring, false);
+		__ring_interrupt_mask(ring, true);
 		ring->start_poll(ring->poll_data);
 	} else {
 		schedule_work(&ring->work);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 427e0d5d8f135e56249c120fdfb50aca1f65021d..539b49adb6afd41190eee97f6eaf1950c8a1ac3f 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1762,7 +1762,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
 {
 	struct n_tty_data *ldata = tty->disc_data;
 
-	if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
+	if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
 		bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
 		ldata->line_start = ldata->read_tail;
 		if (!L_ICANON(tty) || !read_cnt(ldata)) {
@@ -2425,7 +2425,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
 		return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
 	case TIOCINQ:
 		down_write(&tty->termios_rwsem);
-		if (L_ICANON(tty))
+		if (L_ICANON(tty) && !L_EXTPROC(tty))
 			retval = inq_canon(ldata);
 		else
 			retval = read_cnt(ldata);
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index 3593ce0ec641d848f1a15438ffb76295558ab9a6..880009987460affefa3aac2bf61aee6174195f81 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -247,7 +247,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev)
 	if (ret)
 		goto err_mux;
 
-	ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi");
+	ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi");
 	if (ulpi_node) {
 		phy_node = of_get_next_available_child(ulpi_node, NULL);
 		ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy");
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 78e92d29f8d98777c1294292808b1a868dcfcb7f..c821b4b9647e357468335fca83b3aa980e600315 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1007,7 +1007,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
 		case USB_SSP_CAP_TYPE:
 			ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
 			ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
-				USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
+				USB_SSP_SUBLINK_SPEED_ATTRIBS);
 			if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
 				dev->bos->ssp_cap = ssp_cap;
 			break;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a10b346b9777dba58abe8346cb4926e42d8bb7aa..4024926c1d68c93e97e40f822c4a690a4c113987 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -52,10 +52,11 @@ static const struct usb_device_id usb_quirk_list[] = {
 	/* Microsoft LifeCam-VX700 v2.0 */
 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
-	/* Logitech HD Pro Webcams C920, C920-C and C930e */
+	/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
 	{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+	{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
 
 	/* Logitech ConferenceCam CC3000e */
 	{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -149,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
 	/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
 	{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
 
+	/* ELSA MicroLink 56K */
+	{ USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
+
 	/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
 	{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
 
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 4f7895dbcf880ef2a1e3b863b7002825e3cb752c..e26e685d8a578fddffaa7ff220a0c4442ba4f5ce 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -162,7 +162,7 @@ static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id,
 static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
 {
 	dma_addr_t		dma;
-	struct xhci_ring	*ring = s->private;
+	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
 
 	dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
 	seq_printf(s, "%pad\n", &dma);
@@ -173,7 +173,7 @@ static int xhci_ring_enqueue_show(struct seq_file *s, void *unused)
 static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
 {
 	dma_addr_t		dma;
-	struct xhci_ring	*ring = s->private;
+	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
 
 	dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
 	seq_printf(s, "%pad\n", &dma);
@@ -183,7 +183,7 @@ static int xhci_ring_dequeue_show(struct seq_file *s, void *unused)
 
 static int xhci_ring_cycle_show(struct seq_file *s, void *unused)
 {
-	struct xhci_ring	*ring = s->private;
+	struct xhci_ring	*ring = *(struct xhci_ring **)s->private;
 
 	seq_printf(s, "%d\n", ring->cycle_state);
 
@@ -346,7 +346,7 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci,
 }
 
 static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci,
-						   struct xhci_ring *ring,
+						   struct xhci_ring **ring,
 						   const char *name,
 						   struct dentry *parent)
 {
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
 
 	snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
 	epriv->root = xhci_debugfs_create_ring_dir(xhci,
-						   dev->eps[ep_index].new_ring,
+						   &dev->eps[ep_index].new_ring,
 						   epriv->name,
 						   spriv->root);
 	spriv->eps[ep_index] = epriv;
@@ -423,7 +423,7 @@ void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id)
 	priv->dev = dev;
 	dev->debugfs_private = priv;
 
-	xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring,
+	xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring,
 				     "ep00", priv->root);
 
 	xhci_debugfs_create_context_files(xhci, priv->root, slot_id);
@@ -488,11 +488,11 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
 				   ARRAY_SIZE(xhci_extcap_dbc),
 				   "reg-ext-dbc");
 
-	xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring,
+	xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring,
 				     "command-ring",
 				     xhci->debugfs_root);
 
-	xhci_debugfs_create_ring_dir(xhci, xhci->event_ring,
+	xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
 				     "event-ring",
 				     xhci->debugfs_root);
 
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7ef1274ef7f7f245a03e539982da07f6242343b1..1aad89b8aba0b5b5b4610aa161bc7379ba45fbb7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -177,6 +177,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 		xhci->quirks |= XHCI_BROKEN_STREAMS;
 	}
+	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+			pdev->device == 0x0014)
+		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
 			pdev->device == 0x0015)
 		xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2424d3020ca364b22792376e36c21462af3b2f62..da6dbe3ebd8be9c8f137c6f500663be857184080 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3525,8 +3525,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
 	struct xhci_slot_ctx *slot_ctx;
 	int i, ret;
 
-	xhci_debugfs_remove_slot(xhci, udev->slot_id);
-
 #ifndef CONFIG_USB_DEFAULT_PERSIST
 	/*
 	 * We called pm_runtime_get_noresume when the device was attached.
@@ -3555,8 +3553,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
 	}
 
 	ret = xhci_disable_slot(xhci, udev->slot_id);
-	if (ret)
+	if (ret) {
+		xhci_debugfs_remove_slot(xhci, udev->slot_id);
 		xhci_free_virt_device(xhci, udev->slot_id);
+	}
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1aba9105b369678759a204780bb7b3d882b7d879..fc68952c994a5557bb8ca3e2de94f9d2c1b15791 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id id_table_combined[] = {
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
 	{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
+	{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
 	{ }					/* Terminating entry */
 };
 
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4faa09fe308ca0570cc9dcda6900ef74efa3e1d0..8b4ecd2bd297b84d5a86c9b02cd5c38d3a6e86a8 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -914,6 +914,12 @@
 #define ICPDAS_I7561U_PID		0x0104
 #define ICPDAS_I7563U_PID		0x0105
 
+/*
+ * Airbus Defence and Space
+ */
+#define AIRBUS_DS_VID			0x1e8e  /* Vendor ID */
+#define AIRBUS_DS_P8GR			0x6001  /* Tetra P8GR */
+
 /*
  * RT Systems programming cables for various ham radios
  */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3b3513874cfd1e75a5380ee208f02c1144919cd1..b6320e3be42970c984cb86505251cde089437102 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Qualcomm's vendor ID */
 #define QUECTEL_PRODUCT_UC20			0x9003
 #define QUECTEL_PRODUCT_UC15			0x9090
+/* These Yuga products use Qualcomm's vendor ID */
+#define YUGA_PRODUCT_CLM920_NC5			0x9625
 
 #define QUECTEL_VENDOR_ID			0x2c7c
 /* These Quectel products use Quectel's vendor ID */
@@ -280,6 +282,7 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
 #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
 #define TELIT_PRODUCT_ME910			0x1100
+#define TELIT_PRODUCT_ME910_DUAL_MODEM		0x1101
 #define TELIT_PRODUCT_LE920			0x1200
 #define TELIT_PRODUCT_LE910			0x1201
 #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
@@ -645,6 +648,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
 	.reserved = BIT(1) | BIT(3),
 };
 
+static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
+	.sendsetup = BIT(0),
+	.reserved = BIT(3),
+};
+
 static const struct option_blacklist_info telit_le910_blacklist = {
 	.sendsetup = BIT(0),
 	.reserved = BIT(1) | BIT(2),
@@ -674,6 +682,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
 	.reserved = BIT(4) | BIT(5),
 };
 
+static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
+	.reserved = BIT(1) | BIT(4),
+};
+
 static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1178,6 +1190,9 @@ static const struct usb_device_id option_ids[] = {
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+	/* Yuga products use Qualcomm vendor ID */
+	{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
+	  .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
 	/* Quectel products using Quectel vendor ID */
 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1244,6 +1259,8 @@ static const struct usb_device_id option_ids[] = {
 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
 		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+		.driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index e3892541a489940f58bfa8f534ff0e765fc527f4..613f91add03da189c0fd5334cbccbbf720060079 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
 	{DEVICE_SWI(0x1199, 0x9079)},	/* Sierra Wireless EM74xx */
 	{DEVICE_SWI(0x1199, 0x907a)},	/* Sierra Wireless EM74xx QDL */
 	{DEVICE_SWI(0x1199, 0x907b)},	/* Sierra Wireless EM74xx */
+	{DEVICE_SWI(0x1199, 0x9090)},	/* Sierra Wireless EM7565 QDL */
+	{DEVICE_SWI(0x1199, 0x9091)},	/* Sierra Wireless EM7565 */
 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -342,6 +344,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
 			break;
 		case 2:
 			dev_dbg(dev, "NMEA GPS interface found\n");
+			sendsetup = true;
 			break;
 		case 3:
 			dev_dbg(dev, "Modem port found\n");
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index a3df8ee82faff77d9c114ed605412ccfb0704ce6..e31a6f204397dba47b3356ec3bbfe420539dc05b 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -149,8 +149,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
 	 * step 1?
 	 */
 	if (ud->tcp_socket) {
-		dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
-			ud->tcp_socket);
+		dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
 	}
 
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 4f48b306713f1a62942905122c9164beb134ce5f..c31c8402a0c55ddd2f4b463ebabd28be6438f490 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -237,11 +237,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
 	struct stub_priv *priv;
 	struct urb *urb;
 
-	dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
+	dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
 
 	while ((priv = stub_priv_pop(sdev))) {
 		urb = priv->urb;
-		dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
+		dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
+			priv->seqnum);
 		usb_kill_urb(urb);
 
 		kmem_cache_free(stub_priv_cache, priv);
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 493ac2928391accc4984e3ceddbfdadd2690a26c..6c5a593139996fe11be9c2bf86eb8cc19a390ef1 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -211,9 +211,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
 		if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
 			continue;
 
-		dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
-			 priv->urb);
-
 		/*
 		 * This matched urb is not completed yet (i.e., be in
 		 * flight in usb hcd hardware/driver). Now we are
@@ -252,8 +249,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
 		ret = usb_unlink_urb(priv->urb);
 		if (ret != -EINPROGRESS)
 			dev_err(&priv->urb->dev->dev,
-				"failed to unlink a urb %p, ret %d\n",
-				priv->urb, ret);
+				"failed to unlink a urb # %lu, ret %d\n",
+				priv->seqnum, ret);
 
 		return 0;
 	}
@@ -342,14 +339,6 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
 
 	epd = &ep->desc;
 
-	/* validate transfer_buffer_length */
-	if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) {
-		dev_err(&sdev->udev->dev,
-			"CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n",
-			pdu->u.cmd_submit.transfer_buffer_length);
-		return -1;
-	}
-
 	if (usb_endpoint_xfer_control(epd)) {
 		if (dir == USBIP_DIR_OUT)
 			return usb_sndctrlpipe(udev, epnum);
@@ -482,8 +471,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
 	}
 
 	/* allocate urb transfer buffer, if needed */
-	if (pdu->u.cmd_submit.transfer_buffer_length > 0 &&
-	    pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) {
+	if (pdu->u.cmd_submit.transfer_buffer_length > 0) {
 		priv->urb->transfer_buffer =
 			kzalloc(pdu->u.cmd_submit.transfer_buffer_length,
 				GFP_KERNEL);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index 53172b1f6257cf9f8d72dac57212c0ec939a8dc4..f0ec41a50cbc16f9814ca8b2a7590dbe3c465fab 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -88,7 +88,7 @@ void stub_complete(struct urb *urb)
 	/* link a urb to the queue of tx. */
 	spin_lock_irqsave(&sdev->priv_lock, flags);
 	if (sdev->ud.tcp_socket == NULL) {
-		usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
+		usbip_dbg_stub_tx("ignore urb for closed connection\n");
 		/* It will be freed in stub_device_cleanup_urbs(). */
 	} else if (priv->unlinking) {
 		stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
@@ -190,8 +190,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
 
 		/* 1. setup usbip_header */
 		setup_ret_submit_pdu(&pdu_header, urb);
-		usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
-				  pdu_header.base.seqnum, urb);
+		usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
+				  pdu_header.base.seqnum);
 		usbip_header_correct_endian(&pdu_header, 1);
 
 		iov[iovnum].iov_base = &pdu_header;
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index f7978933b40290787ded82fd7b244783610d206e..7b219d9109b41282537363caa976e53c9b675332 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -317,26 +317,20 @@ int usbip_recv(struct socket *sock, void *buf, int size)
 	struct msghdr msg = {.msg_flags = MSG_NOSIGNAL};
 	int total = 0;
 
+	if (!sock || !buf || !size)
+		return -EINVAL;
+
 	iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
 
 	usbip_dbg_xmit("enter\n");
 
-	if (!sock || !buf || !size) {
-		pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
-		       size);
-		return -EINVAL;
-	}
-
 	do {
-		int sz = msg_data_left(&msg);
+		msg_data_left(&msg);
 		sock->sk->sk_allocation = GFP_NOIO;
 
 		result = sock_recvmsg(sock, &msg, MSG_WAITALL);
-		if (result <= 0) {
-			pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
-				 sock, buf + total, sz, result, total);
+		if (result <= 0)
 			goto err;
-		}
 
 		total += result;
 	} while (msg_data_left(&msg));
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 6b3278c4b72a0d745a724b3ef93cce0dc3dccd7d..c3e1008aa491ee45071adab6215f7440aa6dfa13 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -656,9 +656,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
 	struct vhci_device *vdev;
 	unsigned long flags;
 
-	usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
-			  hcd, urb, mem_flags);
-
 	if (portnum > VHCI_HC_PORTS) {
 		pr_err("invalid port number %d\n", portnum);
 		return -ENODEV;
@@ -822,8 +819,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 	struct vhci_device *vdev;
 	unsigned long flags;
 
-	pr_info("dequeue a urb %p\n", urb);
-
 	spin_lock_irqsave(&vhci->lock, flags);
 
 	priv = urb->hcpriv;
@@ -851,7 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 		/* tcp connection is closed */
 		spin_lock(&vdev->priv_lock);
 
-		pr_info("device %p seems to be disconnected\n", vdev);
 		list_del(&priv->list);
 		kfree(priv);
 		urb->hcpriv = NULL;
@@ -863,8 +857,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 		 * vhci_rx will receive RET_UNLINK and give back the URB.
 		 * Otherwise, we give back it here.
 		 */
-		pr_info("gives back urb %p\n", urb);
-
 		usb_hcd_unlink_urb_from_ep(hcd, urb);
 
 		spin_unlock_irqrestore(&vhci->lock, flags);
@@ -892,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
 		unlink->unlink_seqnum = priv->seqnum;
 
-		pr_info("device %p seems to be still connected\n", vdev);
-
 		/* send cmd_unlink and try to cancel the pending URB in the
 		 * peer */
 		list_add_tail(&unlink->list, &vdev->unlink_tx);
@@ -975,7 +965,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
 
 	/* need this? see stub_dev.c */
 	if (ud->tcp_socket) {
-		pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
+		pr_debug("shutdown tcp_socket %d\n", ud->sockfd);
 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
 	}
 
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 90577e8b2282393014558da959125345b1f7008c..112ebb90d8c95e0eb8ba3aa800d3b66941cb7e6c 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -23,24 +23,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
 		urb = priv->urb;
 		status = urb->status;
 
-		usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
-				urb, priv, seqnum);
+		usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
 
 		switch (status) {
 		case -ENOENT:
 			/* fall through */
 		case -ECONNRESET:
-			dev_info(&urb->dev->dev,
-				 "urb %p was unlinked %ssynchronuously.\n", urb,
-				 status == -ENOENT ? "" : "a");
+			dev_dbg(&urb->dev->dev,
+				 "urb seq# %u was unlinked %ssynchronuously\n",
+				 seqnum, status == -ENOENT ? "" : "a");
 			break;
 		case -EINPROGRESS:
 			/* no info output */
 			break;
 		default:
-			dev_info(&urb->dev->dev,
-				 "urb %p may be in a error, status %d\n", urb,
-				 status);
+			dev_dbg(&urb->dev->dev,
+				 "urb seq# %u may be in a error, status %d\n",
+				 seqnum, status);
 		}
 
 		list_del(&priv->list);
@@ -67,8 +66,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 	spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
 	if (!urb) {
-		pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
-		pr_info("max seqnum %d\n",
+		pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
+			pdu->base.seqnum,
 			atomic_read(&vhci_hcd->seqnum));
 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
 		return;
@@ -91,7 +90,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 	if (usbip_dbg_flag_vhci_rx)
 		usbip_dump_urb(urb);
 
-	usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+	usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
 
 	spin_lock_irqsave(&vhci->lock, flags);
 	usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb);
@@ -158,7 +157,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 		pr_info("the urb (seqnum %d) was already given back\n",
 			pdu->base.seqnum);
 	} else {
-		usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
+		usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
 
 		/* If unlink is successful, status is -ECONNRESET */
 		urb->status = pdu->u.ret_unlink.status;
diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
index d625a2ff4b712f4112bd48b8115ea0104e005d15..9aed15a358b7b98b0fab9e6b02b6820cff9ff808 100644
--- a/drivers/usb/usbip/vhci_tx.c
+++ b/drivers/usb/usbip/vhci_tx.c
@@ -69,7 +69,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
 		memset(&msg, 0, sizeof(msg));
 		memset(&iov, 0, sizeof(iov));
 
-		usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
+		usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
+				  priv->seqnum);
 
 		/* 1. setup usbip_header */
 		setup_cmd_submit_pdu(&pdu_header, urb);
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index f77e499afdddb02c2d7595459a70b15f3a8c56d5..065f0b607373402a0d4dd7520b9820503390fb0a 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource)
 	kfree(resource);
 }
 
+/*
+ * Host memory not allocated to dom0. We can use this range for hotplug-based
+ * ballooning.
+ *
+ * It's a type-less resource. Setting IORESOURCE_MEM will make resource
+ * management algorithms (arch_remove_reservations()) look into guest e820,
+ * which we don't want.
+ */
+static struct resource hostmem_resource = {
+	.name   = "Host RAM",
+};
+
+void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
+{}
+
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
-	struct resource *res;
-	int ret;
+	struct resource *res, *res_hostmem;
+	int ret = -ENOMEM;
 
 	res = kzalloc(sizeof(*res), GFP_KERNEL);
 	if (!res)
@@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size)
 	res->name = "System RAM";
 	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-	ret = allocate_resource(&iomem_resource, res,
-				size, 0, -1,
-				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
-	if (ret < 0) {
-		pr_err("Cannot allocate new System RAM resource\n");
-		kfree(res);
-		return NULL;
+	res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (res_hostmem) {
+		/* Try to grab a range from hostmem */
+		res_hostmem->name = "Host memory";
+		ret = allocate_resource(&hostmem_resource, res_hostmem,
+					size, 0, -1,
+					PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+	}
+
+	if (!ret) {
+		/*
+		 * Insert this resource into iomem. Because hostmem_resource
+		 * tracks portion of guest e820 marked as UNUSABLE noone else
+		 * should try to use it.
+		 */
+		res->start = res_hostmem->start;
+		res->end = res_hostmem->end;
+		ret = insert_resource(&iomem_resource, res);
+		if (ret < 0) {
+			pr_err("Can't insert iomem_resource [%llx - %llx]\n",
+				res->start, res->end);
+			release_memory_resource(res_hostmem);
+			res_hostmem = NULL;
+			res->start = res->end = 0;
+		}
+	}
+
+	if (ret) {
+		ret = allocate_resource(&iomem_resource, res,
+					size, 0, -1,
+					PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+		if (ret < 0) {
+			pr_err("Cannot allocate new System RAM resource\n");
+			kfree(res);
+			return NULL;
+		}
 	}
 
 #ifdef CONFIG_SPARSEMEM
@@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
 			pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
 			       pfn, limit);
 			release_memory_resource(res);
+			release_memory_resource(res_hostmem);
 			return NULL;
 		}
 	}
@@ -765,6 +810,8 @@ static int __init balloon_init(void)
 	set_online_page_callback(&xen_online_page);
 	register_memory_notifier(&xen_memory_nb);
 	register_sysctl_table(xen_root);
+
+	arch_xen_balloon_init(&hostmem_resource);
 #endif
 
 #ifdef CONFIG_XEN_PV
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 0da80019a9173cba18811abc71e39ca4631a7449..83ed7715f856d2025509c308583436e63d0043a5 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -702,7 +702,7 @@ xfs_alloc_ag_vextent(
 	ASSERT(args->agbno % args->alignment == 0);
 
 	/* if not file data, insert new block into the reverse map btree */
-	if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+	if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
 		error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
 				       args->agbno, args->len, &args->oinfo);
 		if (error)
@@ -1682,7 +1682,7 @@ xfs_free_ag_extent(
 	bno_cur = cnt_cur = NULL;
 	mp = tp->t_mountp;
 
-	if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+	if (!xfs_rmap_should_skip_owner_update(oinfo)) {
 		error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
 		if (error)
 			goto error0;
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 6249c92671debe20a45e3cb0577360552e36d523..a76914db72ef11094cd8d74e5bd6cd6add2a3fde 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -212,6 +212,7 @@ xfs_attr_set(
 	int			flags)
 {
 	struct xfs_mount	*mp = dp->i_mount;
+	struct xfs_buf		*leaf_bp = NULL;
 	struct xfs_da_args	args;
 	struct xfs_defer_ops	dfops;
 	struct xfs_trans_res	tres;
@@ -327,9 +328,16 @@ xfs_attr_set(
 		 * GROT: another possible req'mt for a double-split btree op.
 		 */
 		xfs_defer_init(args.dfops, args.firstblock);
-		error = xfs_attr_shortform_to_leaf(&args);
+		error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
 		if (error)
 			goto out_defer_cancel;
+		/*
+		 * Prevent the leaf buffer from being unlocked so that a
+		 * concurrent AIL push cannot grab the half-baked leaf
+		 * buffer and run into problems with the write verifier.
+		 */
+		xfs_trans_bhold(args.trans, leaf_bp);
+		xfs_defer_bjoin(args.dfops, leaf_bp);
 		xfs_defer_ijoin(args.dfops, dp);
 		error = xfs_defer_finish(&args.trans, args.dfops);
 		if (error)
@@ -337,13 +345,14 @@ xfs_attr_set(
 
 		/*
 		 * Commit the leaf transformation.  We'll need another (linked)
-		 * transaction to add the new attribute to the leaf.
+		 * transaction to add the new attribute to the leaf, which
+		 * means that we have to hold & join the leaf buffer here too.
 		 */
-
 		error = xfs_trans_roll_inode(&args.trans, dp);
 		if (error)
 			goto out;
-
+		xfs_trans_bjoin(args.trans, leaf_bp);
+		leaf_bp = NULL;
 	}
 
 	if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
@@ -374,8 +383,9 @@ xfs_attr_set(
 
 out_defer_cancel:
 	xfs_defer_cancel(&dfops);
-	args.trans = NULL;
 out:
+	if (leaf_bp)
+		xfs_trans_brelse(args.trans, leaf_bp);
 	if (args.trans)
 		xfs_trans_cancel(args.trans);
 	xfs_iunlock(dp, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 53cc8b986eac45c4e5ec4afd319891b6e9f716c5..601eaa36f1ada22e2213f9178bcb5cdb5868034d 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -735,10 +735,13 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args)
 }
 
 /*
- * Convert from using the shortform to the leaf.
+ * Convert from using the shortform to the leaf.  On success, return the
+ * buffer so that we can keep it locked until we're totally done with it.
  */
 int
-xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
+xfs_attr_shortform_to_leaf(
+	struct xfs_da_args	*args,
+	struct xfs_buf		**leaf_bp)
 {
 	xfs_inode_t *dp;
 	xfs_attr_shortform_t *sf;
@@ -818,7 +821,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
 		sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
 	}
 	error = 0;
-
+	*leaf_bp = bp;
 out:
 	kmem_free(tmpbuffer);
 	return error;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index f7dda0c237b044b166d6d178fca3178feff2b644..894124efb421e0d0674b0f39bf63dabfe7916937 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -48,7 +48,8 @@ void	xfs_attr_shortform_create(struct xfs_da_args *args);
 void	xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff);
 int	xfs_attr_shortform_lookup(struct xfs_da_args *args);
 int	xfs_attr_shortform_getvalue(struct xfs_da_args *args);
-int	xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
+int	xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
+			struct xfs_buf **leaf_bp);
 int	xfs_attr_shortform_remove(struct xfs_da_args *args);
 int	xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
 int	xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 1210f684d3c28f9af8d8403c1f0222ef06dc380b..1bddbba6b80c960bdcc10c9a30210c119e1b2f77 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5136,7 +5136,7 @@ __xfs_bunmapi(
 	 * blowing out the transaction with a mix of EFIs and reflink
 	 * adjustments.
 	 */
-	if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
+	if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
 		max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
 	else
 		max_len = len;
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 072ebfe1d6aeb3e00e306a06d71a1b478382f3ad..087fea02c3892c34e1e63df2839bd8d804ba1f3f 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -249,6 +249,10 @@ xfs_defer_trans_roll(
 	for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
 		xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
 
+	/* Hold the (previously bjoin'd) buffer locked across the roll. */
+	for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
+		xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
+
 	trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
 
 	/* Roll the transaction. */
@@ -264,6 +268,12 @@ xfs_defer_trans_roll(
 	for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
 		xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
 
+	/* Rejoin the buffers and dirty them so the log moves forward. */
+	for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
+		xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
+		xfs_trans_bhold(*tp, dop->dop_bufs[i]);
+	}
+
 	return error;
 }
 
@@ -295,6 +305,31 @@ xfs_defer_ijoin(
 		}
 	}
 
+	ASSERT(0);
+	return -EFSCORRUPTED;
+}
+
+/*
+ * Add this buffer to the deferred op.  Each joined buffer is relogged
+ * each time we roll the transaction.
+ */
+int
+xfs_defer_bjoin(
+	struct xfs_defer_ops		*dop,
+	struct xfs_buf			*bp)
+{
+	int				i;
+
+	for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
+		if (dop->dop_bufs[i] == bp)
+			return 0;
+		else if (dop->dop_bufs[i] == NULL) {
+			dop->dop_bufs[i] = bp;
+			return 0;
+		}
+	}
+
+	ASSERT(0);
 	return -EFSCORRUPTED;
 }
 
@@ -493,9 +528,7 @@ xfs_defer_init(
 	struct xfs_defer_ops		*dop,
 	xfs_fsblock_t			*fbp)
 {
-	dop->dop_committed = false;
-	dop->dop_low = false;
-	memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
+	memset(dop, 0, sizeof(struct xfs_defer_ops));
 	*fbp = NULLFSBLOCK;
 	INIT_LIST_HEAD(&dop->dop_intake);
 	INIT_LIST_HEAD(&dop->dop_pending);
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index d4f046dd44bd4ae434d8104991d0327f2d2b9fc7..045beacdd37d81c9e01e0ab46ab2bbcbbb581fbb 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -59,6 +59,7 @@ enum xfs_defer_ops_type {
 };
 
 #define XFS_DEFER_OPS_NR_INODES	2	/* join up to two inodes */
+#define XFS_DEFER_OPS_NR_BUFS	2	/* join up to two buffers */
 
 struct xfs_defer_ops {
 	bool			dop_committed;	/* did any trans commit? */
@@ -66,8 +67,9 @@ struct xfs_defer_ops {
 	struct list_head	dop_intake;	/* unlogged pending work */
 	struct list_head	dop_pending;	/* logged pending work */
 
-	/* relog these inodes with each roll */
+	/* relog these with each roll */
 	struct xfs_inode	*dop_inodes[XFS_DEFER_OPS_NR_INODES];
+	struct xfs_buf		*dop_bufs[XFS_DEFER_OPS_NR_BUFS];
 };
 
 void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
@@ -77,6 +79,7 @@ void xfs_defer_cancel(struct xfs_defer_ops *dop);
 void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
 bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
 int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
+int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
 
 /* Description of a deferred type. */
 struct xfs_defer_op_type {
diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c
index 89bf16b4d9377293fa842c48f2b9b637f83c62c2..b0f31791c7e6137c0b7e46c35000e3c76e145ba1 100644
--- a/fs/xfs/libxfs/xfs_iext_tree.c
+++ b/fs/xfs/libxfs/xfs_iext_tree.c
@@ -632,8 +632,6 @@ xfs_iext_insert(
 	struct xfs_iext_leaf	*new = NULL;
 	int			nr_entries, i;
 
-	trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
-
 	if (ifp->if_height == 0)
 		xfs_iext_alloc_root(ifp, cur);
 	else if (ifp->if_height == 1)
@@ -661,6 +659,8 @@ xfs_iext_insert(
 	xfs_iext_set(cur_rec(cur), irec);
 	ifp->if_bytes += sizeof(struct xfs_iext_rec);
 
+	trace_xfs_iext_insert(ip, cur, state, _RET_IP_);
+
 	if (new)
 		xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2);
 }
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 585b35d34142157863740d3b8cd437b37e49e05d..c40d26763075307b064d49bd3cb48dfce8dd5b67 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1488,27 +1488,12 @@ __xfs_refcount_cow_alloc(
 	xfs_extlen_t		aglen,
 	struct xfs_defer_ops	*dfops)
 {
-	int			error;
-
 	trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
 			agbno, aglen);
 
 	/* Add refcount btree reservation */
-	error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+	return xfs_refcount_adjust_cow(rcur, agbno, aglen,
 			XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
-	if (error)
-		return error;
-
-	/* Add rmap entry */
-	if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
-		error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
-				rcur->bc_private.a.agno,
-				agbno, aglen, XFS_RMAP_OWN_COW);
-		if (error)
-			return error;
-	}
-
-	return error;
 }
 
 /*
@@ -1521,27 +1506,12 @@ __xfs_refcount_cow_free(
 	xfs_extlen_t		aglen,
 	struct xfs_defer_ops	*dfops)
 {
-	int			error;
-
 	trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
 			agbno, aglen);
 
 	/* Remove refcount btree reservation */
-	error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
+	return xfs_refcount_adjust_cow(rcur, agbno, aglen,
 			XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
-	if (error)
-		return error;
-
-	/* Remove rmap entry */
-	if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
-		error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
-				rcur->bc_private.a.agno,
-				agbno, aglen, XFS_RMAP_OWN_COW);
-		if (error)
-			return error;
-	}
-
-	return error;
 }
 
 /* Record a CoW staging extent in the refcount btree. */
@@ -1552,11 +1522,19 @@ xfs_refcount_alloc_cow_extent(
 	xfs_fsblock_t			fsb,
 	xfs_extlen_t			len)
 {
+	int				error;
+
 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
 		return 0;
 
-	return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
+	error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
 			fsb, len);
+	if (error)
+		return error;
+
+	/* Add rmap entry */
+	return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+			XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
 }
 
 /* Forget a CoW staging event in the refcount btree. */
@@ -1567,9 +1545,17 @@ xfs_refcount_free_cow_extent(
 	xfs_fsblock_t			fsb,
 	xfs_extlen_t			len)
 {
+	int				error;
+
 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
 		return 0;
 
+	/* Remove rmap entry */
+	error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
+			XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
+	if (error)
+		return error;
+
 	return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
 			fsb, len);
 }
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index dd019cee1b3bdccf4e08b25fd70b2c59220859a1..50db920ceeebbf077c2b3b13690066173ba7cf4e 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -367,6 +367,51 @@ xfs_rmap_lookup_le_range(
 	return error;
 }
 
+/*
+ * Perform all the relevant owner checks for a removal op.  If we're doing an
+ * unknown-owner removal then we have no owner information to check.
+ */
+static int
+xfs_rmap_free_check_owner(
+	struct xfs_mount	*mp,
+	uint64_t		ltoff,
+	struct xfs_rmap_irec	*rec,
+	xfs_fsblock_t		bno,
+	xfs_filblks_t		len,
+	uint64_t		owner,
+	uint64_t		offset,
+	unsigned int		flags)
+{
+	int			error = 0;
+
+	if (owner == XFS_RMAP_OWN_UNKNOWN)
+		return 0;
+
+	/* Make sure the unwritten flag matches. */
+	XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
+			(rec->rm_flags & XFS_RMAP_UNWRITTEN), out);
+
+	/* Make sure the owner matches what we expect to find in the tree. */
+	XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out);
+
+	/* Check the offset, if necessary. */
+	if (XFS_RMAP_NON_INODE_OWNER(owner))
+		goto out;
+
+	if (flags & XFS_RMAP_BMBT_BLOCK) {
+		XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK,
+				out);
+	} else {
+		XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out);
+		XFS_WANT_CORRUPTED_GOTO(mp,
+				ltoff + rec->rm_blockcount >= offset + len,
+				out);
+	}
+
+out:
+	return error;
+}
+
 /*
  * Find the extent in the rmap btree and remove it.
  *
@@ -444,33 +489,40 @@ xfs_rmap_unmap(
 		goto out_done;
 	}
 
-	/* Make sure the unwritten flag matches. */
-	XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
-			(ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+	/*
+	 * If we're doing an unknown-owner removal for EFI recovery, we expect
+	 * to find the full range in the rmapbt or nothing at all.  If we
+	 * don't find any rmaps overlapping either end of the range, we're
+	 * done.  Hopefully this means that the EFI creator already queued
+	 * (and finished) a RUI to remove the rmap.
+	 */
+	if (owner == XFS_RMAP_OWN_UNKNOWN &&
+	    ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
+		struct xfs_rmap_irec    rtrec;
+
+		error = xfs_btree_increment(cur, 0, &i);
+		if (error)
+			goto out_error;
+		if (i == 0)
+			goto out_done;
+		error = xfs_rmap_get_rec(cur, &rtrec, &i);
+		if (error)
+			goto out_error;
+		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+		if (rtrec.rm_startblock >= bno + len)
+			goto out_done;
+	}
 
 	/* Make sure the extent we found covers the entire freeing range. */
 	XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
-		ltrec.rm_startblock + ltrec.rm_blockcount >=
-		bno + len, out_error);
+			ltrec.rm_startblock + ltrec.rm_blockcount >=
+			bno + len, out_error);
 
-	/* Make sure the owner matches what we expect to find in the tree. */
-	XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner ||
-				    XFS_RMAP_NON_INODE_OWNER(owner), out_error);
-
-	/* Check the offset, if necessary. */
-	if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
-		if (flags & XFS_RMAP_BMBT_BLOCK) {
-			XFS_WANT_CORRUPTED_GOTO(mp,
-					ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
-					out_error);
-		} else {
-			XFS_WANT_CORRUPTED_GOTO(mp,
-					ltrec.rm_offset <= offset, out_error);
-			XFS_WANT_CORRUPTED_GOTO(mp,
-					ltoff + ltrec.rm_blockcount >= offset + len,
-					out_error);
-		}
-	}
+	/* Check owner information. */
+	error = xfs_rmap_free_check_owner(mp, ltoff, &ltrec, bno, len, owner,
+			offset, flags);
+	if (error)
+		goto out_error;
 
 	if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
 		/* exact match, simply remove the record from rmap tree */
@@ -664,6 +716,7 @@ xfs_rmap_map(
 		flags |= XFS_RMAP_UNWRITTEN;
 	trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
 			unwritten, oinfo);
+	ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
 
 	/*
 	 * For the initial lookup, look for an exact match or the left-adjacent
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 466ede637080e5832046a96d62789eb2e46ed03f..0fcd5b1ba7295379081e0c61d230324447a8ae56 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -61,7 +61,21 @@ static inline void
 xfs_rmap_skip_owner_update(
 	struct xfs_owner_info	*oi)
 {
-	oi->oi_owner = XFS_RMAP_OWN_UNKNOWN;
+	xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_NULL);
+}
+
+static inline bool
+xfs_rmap_should_skip_owner_update(
+	struct xfs_owner_info	*oi)
+{
+	return oi->oi_owner == XFS_RMAP_OWN_NULL;
+}
+
+static inline void
+xfs_rmap_any_owner_update(
+	struct xfs_owner_info	*oi)
+{
+	xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_UNKNOWN);
 }
 
 /* Reverse mapping functions. */
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 44f8c54512102577dbe768e5f137298ba1ee79c7..64da90655e957c3fd01331720aa32093909ddad6 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -538,7 +538,7 @@ xfs_efi_recover(
 		return error;
 	efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
 
-	xfs_rmap_skip_owner_update(&oinfo);
+	xfs_rmap_any_owner_update(&oinfo);
 	for (i = 0; i < efip->efi_format.efi_nextents; i++) {
 		extp = &efip->efi_format.efi_extents[i];
 		error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 8f22fc579dbba4abf9b609040802d24e9cf2f732..60a2e128cb6a59aa7181faa5c519d511a308bc5c 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -571,6 +571,11 @@ xfs_growfs_data_private(
 		 * this doesn't actually exist in the rmap btree.
 		 */
 		xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
+		error = xfs_rmap_free(tp, bp, agno,
+				be32_to_cpu(agf->agf_length) - new,
+				new, &oinfo);
+		if (error)
+			goto error0;
 		error = xfs_free_extent(tp,
 				XFS_AGB_TO_FSB(mp, agno,
 					be32_to_cpu(agf->agf_length) - new),
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 43005fbe8b1eefabc84ee762a9427ec784889814..3861d61fb265f66a9d39723286d5adc9772cc15e 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -870,7 +870,7 @@ xfs_eofblocks_worker(
  * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
  * (We'll just piggyback on the post-EOF prealloc space workqueue.)
  */
-STATIC void
+void
 xfs_queue_cowblocks(
 	struct xfs_mount *mp)
 {
@@ -1536,8 +1536,23 @@ xfs_inode_free_quota_eofblocks(
 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
 }
 
+static inline unsigned long
+xfs_iflag_for_tag(
+	int		tag)
+{
+	switch (tag) {
+	case XFS_ICI_EOFBLOCKS_TAG:
+		return XFS_IEOFBLOCKS;
+	case XFS_ICI_COWBLOCKS_TAG:
+		return XFS_ICOWBLOCKS;
+	default:
+		ASSERT(0);
+		return 0;
+	}
+}
+
 static void
-__xfs_inode_set_eofblocks_tag(
+__xfs_inode_set_blocks_tag(
 	xfs_inode_t	*ip,
 	void		(*execute)(struct xfs_mount *mp),
 	void		(*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
@@ -1552,10 +1567,10 @@ __xfs_inode_set_eofblocks_tag(
 	 * Don't bother locking the AG and looking up in the radix trees
 	 * if we already know that we have the tag set.
 	 */
-	if (ip->i_flags & XFS_IEOFBLOCKS)
+	if (ip->i_flags & xfs_iflag_for_tag(tag))
 		return;
 	spin_lock(&ip->i_flags_lock);
-	ip->i_flags |= XFS_IEOFBLOCKS;
+	ip->i_flags |= xfs_iflag_for_tag(tag);
 	spin_unlock(&ip->i_flags_lock);
 
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1587,13 +1602,13 @@ xfs_inode_set_eofblocks_tag(
 	xfs_inode_t	*ip)
 {
 	trace_xfs_inode_set_eofblocks_tag(ip);
-	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
+	return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
 			trace_xfs_perag_set_eofblocks,
 			XFS_ICI_EOFBLOCKS_TAG);
 }
 
 static void
-__xfs_inode_clear_eofblocks_tag(
+__xfs_inode_clear_blocks_tag(
 	xfs_inode_t	*ip,
 	void		(*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
 				    int error, unsigned long caller_ip),
@@ -1603,7 +1618,7 @@ __xfs_inode_clear_eofblocks_tag(
 	struct xfs_perag *pag;
 
 	spin_lock(&ip->i_flags_lock);
-	ip->i_flags &= ~XFS_IEOFBLOCKS;
+	ip->i_flags &= ~xfs_iflag_for_tag(tag);
 	spin_unlock(&ip->i_flags_lock);
 
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -1630,7 +1645,7 @@ xfs_inode_clear_eofblocks_tag(
 	xfs_inode_t	*ip)
 {
 	trace_xfs_inode_clear_eofblocks_tag(ip);
-	return __xfs_inode_clear_eofblocks_tag(ip,
+	return __xfs_inode_clear_blocks_tag(ip,
 			trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
 }
 
@@ -1724,7 +1739,7 @@ xfs_inode_set_cowblocks_tag(
 	xfs_inode_t	*ip)
 {
 	trace_xfs_inode_set_cowblocks_tag(ip);
-	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
+	return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
 			trace_xfs_perag_set_cowblocks,
 			XFS_ICI_COWBLOCKS_TAG);
 }
@@ -1734,6 +1749,6 @@ xfs_inode_clear_cowblocks_tag(
 	xfs_inode_t	*ip)
 {
 	trace_xfs_inode_clear_cowblocks_tag(ip);
-	return __xfs_inode_clear_eofblocks_tag(ip,
+	return __xfs_inode_clear_blocks_tag(ip,
 			trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
 }
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index bff4d85e54984ad84ca0741f801f536a99195055..d4a77588eca15b90639debc1ca6c5c62a3680de8 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -81,6 +81,7 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
 int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *);
 int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
 void xfs_cowblocks_worker(struct work_struct *);
+void xfs_queue_cowblocks(struct xfs_mount *);
 
 int xfs_inode_ag_iterator(struct xfs_mount *mp,
 	int (*execute)(struct xfs_inode *ip, int flags, void *args),
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b41952a4ddd851fe63a475be4f2b6bc8d7e47beb..6f95bdb408ced01b9471b931714d279003a22d92 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1487,6 +1487,24 @@ xfs_link(
 	return error;
 }
 
+/* Clear the reflink flag and the cowblocks tag if possible. */
+static void
+xfs_itruncate_clear_reflink_flags(
+	struct xfs_inode	*ip)
+{
+	struct xfs_ifork	*dfork;
+	struct xfs_ifork	*cfork;
+
+	if (!xfs_is_reflink_inode(ip))
+		return;
+	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
+		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
+	if (cfork->if_bytes == 0)
+		xfs_inode_clear_cowblocks_tag(ip);
+}
+
 /*
  * Free up the underlying blocks past new_size.  The new size must be smaller
  * than the current size.  This routine can be used both for the attribute and
@@ -1583,15 +1601,7 @@ xfs_itruncate_extents(
 	if (error)
 		goto out;
 
-	/*
-	 * Clear the reflink flag if there are no data fork blocks and
-	 * there are no extents staged in the cow fork.
-	 */
-	if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) {
-		if (ip->i_d.di_nblocks == 0)
-			ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
-		xfs_inode_clear_cowblocks_tag(ip);
-	}
+	xfs_itruncate_clear_reflink_flags(ip);
 
 	/*
 	 * Always re-log the inode so that our permanent transaction can keep
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index b2136af9289f3d854f88a549ec32efa455cfc78b..d383e392ec9ddcca6f552dc8c4cfe5329373d6c9 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -232,6 +232,7 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip)
  * log recovery to replay a bmap operation on the inode.
  */
 #define XFS_IRECOVERY		(1 << 11)
+#define XFS_ICOWBLOCKS		(1 << 12)/* has the cowblocks tag set */
 
 /*
  * Per-lifetime flags need to be reset when re-using a reclaimable inode during
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index cf7c8f81bebb566a486f0f732a0fe6a1040022c4..47aea2e82c268f4bbf9c25c1c1c6f3821c11caa3 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -454,6 +454,8 @@ retry:
 	if (error)
 		goto out_bmap_cancel;
 
+	xfs_inode_set_cowblocks_tag(ip);
+
 	/* Finish up. */
 	error = xfs_defer_finish(&tp, &dfops);
 	if (error)
@@ -490,8 +492,9 @@ xfs_reflink_find_cow_mapping(
 	struct xfs_iext_cursor		icur;
 
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
-	ASSERT(xfs_is_reflink_inode(ip));
 
+	if (!xfs_is_reflink_inode(ip))
+		return false;
 	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
 	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got))
 		return false;
@@ -610,6 +613,9 @@ xfs_reflink_cancel_cow_blocks(
 
 			/* Remove the mapping from the CoW fork. */
 			xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
+		} else {
+			/* Didn't do anything, push cursor back. */
+			xfs_iext_prev(ifp, &icur);
 		}
 next_extent:
 		if (!xfs_iext_get_extent(ifp, &icur, &got))
@@ -725,7 +731,7 @@ xfs_reflink_end_cow(
 			(unsigned int)(end_fsb - offset_fsb),
 			XFS_DATA_FORK);
 	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
-			resblks, 0, 0, &tp);
+			resblks, 0, XFS_TRANS_RESERVE, &tp);
 	if (error)
 		goto out;
 
@@ -1291,6 +1297,17 @@ xfs_reflink_remap_range(
 
 	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
 
+	/*
+	 * Clear out post-eof preallocations because we don't have page cache
+	 * backing the delayed allocations and they'll never get freed on
+	 * their own.
+	 */
+	if (xfs_can_free_eofblocks(dest, true)) {
+		ret = xfs_free_eofblocks(dest);
+		if (ret)
+			goto out_unlock;
+	}
+
 	/* Set flags and remap blocks. */
 	ret = xfs_reflink_set_inode_flag(src, dest);
 	if (ret)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 5122d3021117f00e20d6dd1e195c28666cc71076..1dacccc367f81725a678ea3a6ed50528a731920d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1360,6 +1360,7 @@ xfs_fs_remount(
 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 			return error;
 		}
+		xfs_queue_cowblocks(mp);
 
 		/* Create the per-AG metadata reservation pool .*/
 		error = xfs_fs_reserve_ag_blocks(mp);
@@ -1369,6 +1370,14 @@ xfs_fs_remount(
 
 	/* rw -> ro */
 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
+		/* Get rid of any leftover CoW reservations... */
+		cancel_delayed_work_sync(&mp->m_cowblocks_work);
+		error = xfs_icache_free_cowblocks(mp, NULL);
+		if (error) {
+			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+			return error;
+		}
+
 		/* Free the per-AG metadata reservation pool. */
 		error = xfs_fs_unreserve_ag_blocks(mp);
 		if (error) {
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index ea189d88a3cc761e0239043785e00d9c6d8402f5..8ac4e68a12f08e4e00c1fcf850f0f8c97188d390 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -7,9 +7,10 @@
 #ifndef _ASM_GENERIC_MM_HOOKS_H
 #define _ASM_GENERIC_MM_HOOKS_H
 
-static inline void arch_dup_mmap(struct mm_struct *oldmm,
-				 struct mm_struct *mm)
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+				struct mm_struct *mm)
 {
+	return 0;
 }
 
 static inline void arch_exit_mmap(struct mm_struct *mm)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index b234d54f2cb6e4c23a21db2af3b225264eccae2a..868e68561f913ecaec80ddf05f02816767ea5a17 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1025,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
 struct file;
 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 			unsigned long size, pgprot_t *vma_prot);
+
+#ifndef CONFIG_X86_ESPFIX64
+static inline void init_espfix_bsp(void) { }
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #ifndef io_remap_pfn_range
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
index cceafa01f9073293cf4e813247b54bbe094728e3..b67404fc4b34bab495086b4b1e969f53a0921b39 100644
--- a/include/crypto/mcryptd.h
+++ b/include/crypto/mcryptd.h
@@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
 
 struct mcryptd_cpu_queue {
 	struct crypto_queue queue;
+	spinlock_t q_lock;
 	struct work_struct work;
 };
 
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 6e45608b2399813329e2280e601c2465940def53..9da6ce22803f03fc318a7fdd33af380eae67d4e6 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -62,7 +62,7 @@ struct arch_timer_cpu {
 	bool			enabled;
 };
 
-int kvm_timer_hyp_init(void);
+int kvm_timer_hyp_init(bool);
 int kvm_timer_enable(struct kvm_vcpu *vcpu);
 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 82f0c8fd7be8fd20951af806319f64f615f4ffc6..23d29b39f71e83e8a6a25540adc2e3f28702aec7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -492,6 +492,8 @@ extern unsigned int bvec_nr_vecs(unsigned short idx);
 
 #define bio_set_dev(bio, bdev) 			\
 do {						\
+	if ((bio)->bi_disk != (bdev)->bd_disk)	\
+		bio_clear_flag(bio, BIO_THROTTLED);\
 	(bio)->bi_disk = (bdev)->bd_disk;	\
 	(bio)->bi_partno = (bdev)->bd_partno;	\
 } while (0)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a1e628e032dad75bf1837a25e45b55a7f54ca2df..9e7d8bd776d227d2ba92b137af7230300f5b1d4a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -50,8 +50,6 @@ struct blk_issue_stat {
 struct bio {
 	struct bio		*bi_next;	/* request queue link */
 	struct gendisk		*bi_disk;
-	u8			bi_partno;
-	blk_status_t		bi_status;
 	unsigned int		bi_opf;		/* bottom bits req flags,
 						 * top bits REQ_OP. Use
 						 * accessors.
@@ -59,8 +57,8 @@ struct bio {
 	unsigned short		bi_flags;	/* status, etc and bvec pool number */
 	unsigned short		bi_ioprio;
 	unsigned short		bi_write_hint;
-
-	struct bvec_iter	bi_iter;
+	blk_status_t		bi_status;
+	u8			bi_partno;
 
 	/* Number of segments in this BIO after
 	 * physical address coalescing is performed.
@@ -74,8 +72,9 @@ struct bio {
 	unsigned int		bi_seg_front_size;
 	unsigned int		bi_seg_back_size;
 
-	atomic_t		__bi_remaining;
+	struct bvec_iter	bi_iter;
 
+	atomic_t		__bi_remaining;
 	bio_end_io_t		*bi_end_io;
 
 	void			*bi_private;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8089ca17db9ac65998ec9cf82f65743bb5c5abb9..0ce8a372d5069a7aca7810429a968d20e923d3d1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -135,7 +135,7 @@ typedef __u32 __bitwise req_flags_t;
 struct request {
 	struct list_head queuelist;
 	union {
-		call_single_data_t csd;
+		struct __call_single_data csd;
 		u64 fifo_time;
 	};
 
@@ -241,14 +241,24 @@ struct request {
 	struct request *next_rq;
 };
 
+static inline bool blk_op_is_scsi(unsigned int op)
+{
+	return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+}
+
+static inline bool blk_op_is_private(unsigned int op)
+{
+	return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+}
+
 static inline bool blk_rq_is_scsi(struct request *rq)
 {
-	return req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT;
+	return blk_op_is_scsi(req_op(rq));
 }
 
 static inline bool blk_rq_is_private(struct request *rq)
 {
-	return req_op(rq) == REQ_OP_DRV_IN || req_op(rq) == REQ_OP_DRV_OUT;
+	return blk_op_is_private(req_op(rq));
 }
 
 static inline bool blk_rq_is_passthrough(struct request *rq)
@@ -256,6 +266,13 @@ static inline bool blk_rq_is_passthrough(struct request *rq)
 	return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
 }
 
+static inline bool bio_is_passthrough(struct bio *bio)
+{
+	unsigned op = bio_op(bio);
+
+	return blk_op_is_scsi(op) || blk_op_is_private(op);
+}
+
 static inline unsigned short req_get_ioprio(struct request *req)
 {
 	return req->ioprio;
@@ -948,7 +965,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
 extern void blk_rq_unprep_clone(struct request *rq);
 extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
 				     struct request *rq);
-extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
+extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
 extern void blk_queue_split(struct request_queue *, struct bio **);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c561b986bab0ebf886000ea34e377ea789138a3b..1632bb13ad8aed8cfeba2ccc69cfa458d02540bb 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -15,11 +15,11 @@
  * In practice this is far bigger than any realistic pointer offset; this limit
  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
  */
-#define BPF_MAX_VAR_OFF	(1ULL << 31)
+#define BPF_MAX_VAR_OFF	(1 << 29)
 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
  * that converting umax_value to int cannot overflow.
  */
-#define BPF_MAX_VAR_SIZ	INT_MAX
+#define BPF_MAX_VAR_SIZ	(1 << 29)
 
 /* Liveness marks, used for registers and spilled-regs (in stack slots).
  * Read marks propagate upwards until they find a write mark; they record that
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 201ab72679863163c40f664cacca77b09dfaaef5..1a32e558eb1175e812234dfaa18d5e4665b46f30 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -86,7 +86,7 @@ enum cpuhp_state {
 	CPUHP_MM_ZSWP_POOL_PREPARE,
 	CPUHP_KVM_PPC_BOOK3S_PREPARE,
 	CPUHP_ZCOMP_PREPARE,
-	CPUHP_TIMERS_DEAD,
+	CPUHP_TIMERS_PREPARE,
 	CPUHP_MIPS_SOC_PREPARE,
 	CPUHP_BP_PREPARE_DYN,
 	CPUHP_BP_PREPARE_DYN_END		= CPUHP_BP_PREPARE_DYN + 20,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 55e672592fa93cd94aef5dfab761b688df0a7235..7258cd676df42c3ea77700d1a6ed15e7957e4f37 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -66,9 +66,10 @@ struct gpio_irq_chip {
 	/**
 	 * @lock_key:
 	 *
-	 * Per GPIO IRQ chip lockdep class.
+	 * Per GPIO IRQ chip lockdep classes.
 	 */
 	struct lock_class_key *lock_key;
+	struct lock_class_key *request_key;
 
 	/**
 	 * @parent_handler:
@@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
 
 /* add/remove chips */
 extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
-				      struct lock_class_key *lock_key);
+				      struct lock_class_key *lock_key,
+				      struct lock_class_key *request_key);
 
 /**
  * gpiochip_add_data() - register a gpio_chip
@@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
  */
 #ifdef CONFIG_LOCKDEP
 #define gpiochip_add_data(chip, data) ({		\
-		static struct lock_class_key key;	\
-		gpiochip_add_data_with_key(chip, data, &key);	\
+		static struct lock_class_key lock_key;	\
+		static struct lock_class_key request_key;	  \
+		gpiochip_add_data_with_key(chip, data, &lock_key, \
+					   &request_key);	  \
 	})
 #else
-#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL)
+#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL)
 #endif
 
 static inline int gpiochip_add(struct gpio_chip *chip)
@@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
 			     irq_flow_handler_t handler,
 			     unsigned int type,
 			     bool threaded,
-			     struct lock_class_key *lock_key);
+			     struct lock_class_key *lock_key,
+			     struct lock_class_key *request_key);
 
 #ifdef CONFIG_LOCKDEP
 
@@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
 				       irq_flow_handler_t handler,
 				       unsigned int type)
 {
-	static struct lock_class_key key;
+	static struct lock_class_key lock_key;
+	static struct lock_class_key request_key;
 
 	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-					handler, type, false, &key);
+					handler, type, false,
+					&lock_key, &request_key);
 }
 
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
 			  unsigned int type)
 {
 
-	static struct lock_class_key key;
+	static struct lock_class_key lock_key;
+	static struct lock_class_key request_key;
 
 	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-					handler, type, true, &key);
+					handler, type, true,
+					&lock_key, &request_key);
 }
 #else
 static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
@@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
 				       unsigned int type)
 {
 	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-					handler, type, false, NULL);
+					handler, type, false, NULL, NULL);
 }
 
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
@@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
 			  unsigned int type)
 {
 	return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
-					handler, type, true, NULL);
+					handler, type, true, NULL, NULL);
 }
 #endif /* CONFIG_LOCKDEP */
 
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index cb18c6290ca87290996e636f3eda14eb03d26316..8415bf1a9776245b810c8f92fa16c98eb038a45f 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -273,7 +273,8 @@ struct ipv6_pinfo {
 						 * 100: prefer care-of address
 						 */
 				dontfrag:1,
-				autoflowlabel:1;
+				autoflowlabel:1,
+				autoflowlabel_set:1;
 	__u8			min_hopcount;
 	__u8			tclass;
 	__be32			rcv_flowinfo;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e140f69163b693b386bdc709719b4efc3d8a30b0..a0231e96a578348c21596a3c9d2d0cbe4a9a1249 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -212,6 +212,7 @@ struct irq_data {
  *				  mask. Applies only to affinity managed irqs.
  * IRQD_SINGLE_TARGET		- IRQ allows only a single affinity target
  * IRQD_DEFAULT_TRIGGER_SET	- Expected trigger already been set
+ * IRQD_CAN_RESERVE		- Can use reservation mode
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -233,6 +234,7 @@ enum {
 	IRQD_MANAGED_SHUTDOWN		= (1 << 23),
 	IRQD_SINGLE_TARGET		= (1 << 24),
 	IRQD_DEFAULT_TRIGGER_SET	= (1 << 25),
+	IRQD_CAN_RESERVE		= (1 << 26),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d)
 	return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN;
 }
 
+static inline void irqd_set_can_reserve(struct irq_data *d)
+{
+	__irqd_to_state(d) |= IRQD_CAN_RESERVE;
+}
+
+static inline void irqd_clr_can_reserve(struct irq_data *d)
+{
+	__irqd_to_state(d) &= ~IRQD_CAN_RESERVE;
+}
+
+static inline bool irqd_can_reserve(struct irq_data *d)
+{
+	return __irqd_to_state(d) & IRQD_CAN_RESERVE;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 39fb3700f7a92aae1a6c3d417f5effbfb93d6494..25b33b66453773cb01509725fa68664c555ffd3f 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -255,12 +255,15 @@ static inline bool irq_is_percpu_devid(unsigned int irq)
 }
 
 static inline void
-irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
+irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
+		      struct lock_class_key *request_class)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
 
-	if (desc)
-		lockdep_set_class(&desc->lock, class);
+	if (desc) {
+		lockdep_set_class(&desc->lock, lock_class);
+		lockdep_set_class(&desc->request_mutex, request_class);
+	}
 }
 
 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index a34355d195463f93d3235283978633bf60f0b59d..48c7e86bb5566798aba1b1b132ab242b75b05ea9 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -113,7 +113,7 @@ struct irq_domain_ops {
 		     unsigned int nr_irqs, void *arg);
 	void (*free)(struct irq_domain *d, unsigned int virq,
 		     unsigned int nr_irqs);
-	int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
+	int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
 	void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
 	int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
 			 unsigned long *out_hwirq, unsigned int *out_type);
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index a2a1318a3d0c8be0a1fb3d1a08fcf671ff9d8bee..c3d3f04d8cc689eddf217c0626e71d8c16530db5 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -915,10 +915,10 @@ enum PDEV_STAT  {PDEV_STAT_IDLE, PDEV_STAT_RUN};
 #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN	BIT(6)
 
 enum dev_aspm_mode {
-	DEV_ASPM_DISABLE = 0,
 	DEV_ASPM_DYNAMIC,
 	DEV_ASPM_BACKDOOR,
 	DEV_ASPM_STATIC,
+	DEV_ASPM_DISABLE,
 };
 
 /*
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a886b51511abbf146c4c76309aa313e4bbf77dda..1f509d072026d3d62a9e3701ed72bd5ab2d5b5c4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -556,6 +556,7 @@ struct mlx5_core_sriov {
 };
 
 struct mlx5_irq_info {
+	cpumask_var_t mask;
 	char name[MLX5_MAX_IRQ_NAME];
 };
 
@@ -1048,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 		       enum mlx5_eq_type type);
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+void mlx5_stop_eqs(struct mlx5_core_dev *dev);
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 		    unsigned int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1164,6 +1165,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
+				 u64 *values,
+				 int num_counters,
+				 size_t *offsets);
 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
 
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 38a7577a9ce71fbcf63c21e2911364795842daa8..d44ec5f41d4a04c72b25b4db1d6fb0217f8f1fa1 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -147,7 +147,7 @@ enum {
 	MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
 	MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
 	MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
-	MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
+	MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
 	MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
 	MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
 	MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
@@ -7239,7 +7239,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
 	u8         vxlan_udp_port[0x10];
 };
 
-struct mlx5_ifc_set_rate_limit_out_bits {
+struct mlx5_ifc_set_pp_rate_limit_out_bits {
 	u8         status[0x8];
 	u8         reserved_at_8[0x18];
 
@@ -7248,7 +7248,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
 	u8         reserved_at_40[0x40];
 };
 
-struct mlx5_ifc_set_rate_limit_in_bits {
+struct mlx5_ifc_set_pp_rate_limit_in_bits {
 	u8         opcode[0x10];
 	u8         reserved_at_10[0x10];
 
@@ -7261,6 +7261,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
 	u8         reserved_at_60[0x20];
 
 	u8         rate_limit[0x20];
+
+	u8         reserved_at_a0[0x160];
 };
 
 struct mlx5_ifc_access_register_out_bits {
diff --git a/include/linux/pti.h b/include/linux/pti.h
new file mode 100644
index 0000000000000000000000000000000000000000..0174883a935a25ddef6b33ad4765c65f0f3b25c4
--- /dev/null
+++ b/include/linux/pti.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _INCLUDE_PTI_H
+#define _INCLUDE_PTI_H
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#include <asm/pti.h>
+#else
+static inline void pti_init(void) { }
+#endif
+
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 7b2170bfd6e7dae432478fffdbc70e1408740394..bc6bb325d1bf7c03db223c568b891e5d33dc93ca 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -126,7 +126,7 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
  *	for that name.  This appears in the sysfs "modalias" attribute
  *	for driver coldplugging, and in uevents used for hotplugging
  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
- *	when not using a GPIO line)
+ *	not using a GPIO line)
  *
  * @statistics: statistics for the spi_device
  *
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f442d1a42025925eb4446c6678e3a059a8cfbb39..7cc35921218ecb745634e29d0558749981b4089d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void);
 extern void tick_nohz_irq_exit(void);
 extern ktime_t tick_nohz_get_sleep_length(void);
 extern unsigned long tick_nohz_get_idle_calls(void);
+extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
 extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
 #else /* !CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 04af640ea95bd011cdec0101798b0aa7810233cb..2448f9cc48a3120222d29f8110069c626b428543 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j);
 unsigned long round_jiffies_up_relative(unsigned long j);
 
 #ifdef CONFIG_HOTPLUG_CPU
+int timers_prepare_cpu(unsigned int cpu);
 int timers_dead_cpu(unsigned int cpu);
 #else
-#define timers_dead_cpu NULL
+#define timers_prepare_cpu	NULL
+#define timers_dead_cpu		NULL
 #endif
 
 #endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 8b8118a7fadbc74a9aa879dc934de0442bb3a013..cb4d92b79cd932eda4e178861d8345683b329bdb 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3226,7 +3226,6 @@ struct cfg80211_ops {
  * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN.
  * @WIPHY_FLAG_MESH_AUTH: The device supports mesh authentication by routing
  *	auth frames to userspace. See @NL80211_MESH_SETUP_USERSPACE_AUTH.
- * @WIPHY_FLAG_SUPPORTS_SCHED_SCAN: The device supports scheduled scans.
  * @WIPHY_FLAG_SUPPORTS_FW_ROAM: The device supports roaming feature in the
  *	firmware.
  * @WIPHY_FLAG_AP_UAPSD: The device supports uapsd on AP.
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 0105445cab83d32008b3526794c077f4bfbd9816..8e08b6da72f325bd4a623191e886fb1b746644d7 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -694,9 +694,7 @@ struct tc_cls_matchall_offload {
 };
 
 enum tc_clsbpf_command {
-	TC_CLSBPF_ADD,
-	TC_CLSBPF_REPLACE,
-	TC_CLSBPF_DESTROY,
+	TC_CLSBPF_OFFLOAD,
 	TC_CLSBPF_STATS,
 };
 
@@ -705,6 +703,7 @@ struct tc_cls_bpf_offload {
 	enum tc_clsbpf_command command;
 	struct tcf_exts *exts;
 	struct bpf_prog *prog;
+	struct bpf_prog *oldprog;
 	const char *name;
 	bool exts_integrated;
 	u32 gen_flags;
diff --git a/include/net/sock.h b/include/net/sock.h
index 9155da42269208b358df8535b14dfd3dba509365..7a7b14e9628a1174e18dda30cd0b5bfd5b32d30d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1514,6 +1514,11 @@ static inline bool sock_owned_by_user(const struct sock *sk)
 	return sk->sk_lock.owned;
 }
 
+static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
+{
+	return sk->sk_lock.owned;
+}
+
 /* no reclassification while locks are held */
 static inline bool sock_allow_reclassification(const struct sock *csk)
 {
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index dc28a98ce97ca7d53d67809a0fc718f0b9d622f8..ae35991b5877029b217a2b49fba7f012f193148c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1570,6 +1570,9 @@ int xfrm_init_state(struct xfrm_state *x);
 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_trans_queue(struct sk_buff *skb,
+		     int (*finish)(struct net *, struct sock *,
+				   struct sk_buff *));
 int xfrm_output_resume(struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index ca00130cb0281d6fb9cc472ac0d163490f0a1cb1..9c14e21dda85cf94ee47e3faa9bef10a881dc650 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -193,7 +193,7 @@ struct hda_dai_map {
  * @pvt_data - private data, for asoc contains asoc codec object
  */
 struct hdac_ext_device {
-	struct hdac_device hdac;
+	struct hdac_device hdev;
 	struct hdac_ext_bus *ebus;
 
 	/* soc-dai to nid map */
@@ -213,7 +213,7 @@ struct hdac_ext_dma_params {
 	u8 stream_tag;
 };
 #define to_ehdac_device(dev) (container_of((dev), \
-				 struct hdac_ext_device, hdac))
+				 struct hdac_ext_device, hdev))
 /*
  * HD-audio codec base driver
  */
diff --git a/include/sound/rt5514.h b/include/sound/rt5514.h
index ef18494769ee1d9be57a2e973af3417e5df57ce4..64d027dbaaca9637cb230e3de22c153bbb4b26b6 100644
--- a/include/sound/rt5514.h
+++ b/include/sound/rt5514.h
@@ -14,6 +14,8 @@
 
 struct rt5514_platform_data {
 	unsigned int dmic_init_delay;
+	const char *dsp_calib_clk_name;
+	unsigned int dsp_calib_clk_rate;
 };
 
 #endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index d0c33a9972b917c2f1b4e91aebfc35d9540102f8..f218c742f08ebddda4a900010001b3844e13182d 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -25,6 +25,9 @@ struct rt5645_platform_data {
 	bool level_trigger_irq;
 	/* Invert JD1_1 status polarity */
 	bool inv_jd1_1;
+
+	/* Value to asign to snd_soc_card.long_name */
+	const char *long_name;
 };
 
 #endif
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index d970879944fc32e94175a8dfc8652f6c14cda889..8ad11669e4d8f346ae3e654587386af88d08281e 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -296,9 +296,6 @@ struct snd_soc_dai {
 	/* DAI runtime info */
 	unsigned int capture_active:1;		/* stream is in use */
 	unsigned int playback_active:1;		/* stream is in use */
-	unsigned int symmetric_rates:1;
-	unsigned int symmetric_channels:1;
-	unsigned int symmetric_samplebits:1;
 	unsigned int probed:1;
 
 	unsigned int active;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1a7323238c495d7d51313f6ccd8780291ff3e9e5..a34aa200e113d1a106b256cd2091466bba7e66a3 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -494,6 +494,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
 int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
 #endif
 
+void snd_soc_disconnect_sync(struct device *dev);
+
 struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
 		const char *dai_link, int stream);
 struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -858,12 +860,10 @@ struct snd_soc_component {
 	struct list_head card_aux_list; /* for auxiliary bound components */
 	struct list_head card_list;
 
-	struct snd_soc_dai_driver *dai_drv;
-	int num_dai;
-
 	const struct snd_soc_component_driver *driver;
 
 	struct list_head dai_list;
+	int num_dai;
 
 	int (*read)(struct snd_soc_component *, unsigned int, unsigned int *);
 	int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 758607226bfdd5269d33557b650a31628a59d22f..2cd449328aee37e55de94633d803d73fe3057f9e 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent,
 
 	TP_STRUCT__entry(
 		__string(        name,           core->name                )
-		__string(        pname,          parent->name              )
+		__string(        pname, parent ? parent->name : "none"     )
 	),
 
 	TP_fast_assign(
 		__assign_str(name, core->name);
-		__assign_str(pname, parent->name);
+		__assign_str(pname, parent ? parent->name : "none");
 	),
 
 	TP_printk("%s %s", __get_str(name), __get_str(pname))
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index e4b0b8e099325f2801e4f3af168004c603c23794..2c735a3e66133fc08740b4df6d64919c491c9d1e 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -211,7 +211,7 @@ TRACE_EVENT(kvm_ack_irq,
 	{ KVM_TRACE_MMIO_WRITE, "write" }
 
 TRACE_EVENT(kvm_mmio,
-	TP_PROTO(int type, int len, u64 gpa, u64 val),
+	TP_PROTO(int type, int len, u64 gpa, void *val),
 	TP_ARGS(type, len, gpa, val),
 
 	TP_STRUCT__entry(
@@ -225,7 +225,10 @@ TRACE_EVENT(kvm_mmio,
 		__entry->type		= type;
 		__entry->len		= len;
 		__entry->gpa		= gpa;
-		__entry->val		= val;
+		__entry->val		= 0;
+		if (val)
+			memcpy(&__entry->val, val,
+			       min_t(u32, sizeof(__entry->val), len));
 	),
 
 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 07cccca6cbf1762684152146372e35e1cd758338..ab34c561f26bec42a8ff32cb6f7e911447f03af2 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -25,6 +25,35 @@
 		tcp_state_name(TCP_CLOSING),		\
 		tcp_state_name(TCP_NEW_SYN_RECV))
 
+#define TP_STORE_V4MAPPED(__entry, saddr, daddr)		\
+	do {							\
+		struct in6_addr *pin6;				\
+								\
+		pin6 = (struct in6_addr *)__entry->saddr_v6;	\
+		ipv6_addr_set_v4mapped(saddr, pin6);		\
+		pin6 = (struct in6_addr *)__entry->daddr_v6;	\
+		ipv6_addr_set_v4mapped(daddr, pin6);		\
+	} while (0)
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)		\
+	do {								\
+		if (sk->sk_family == AF_INET6) {			\
+			struct in6_addr *pin6;				\
+									\
+			pin6 = (struct in6_addr *)__entry->saddr_v6;	\
+			*pin6 = saddr6;					\
+			pin6 = (struct in6_addr *)__entry->daddr_v6;	\
+			*pin6 = daddr6;					\
+		} else {						\
+			TP_STORE_V4MAPPED(__entry, saddr, daddr);	\
+		}							\
+	} while (0)
+#else
+#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6)	\
+	TP_STORE_V4MAPPED(__entry, saddr, daddr)
+#endif
+
 /*
  * tcp event with arguments sk and skb
  *
@@ -50,7 +79,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
 
 	TP_fast_assign(
 		struct inet_sock *inet = inet_sk(sk);
-		struct in6_addr *pin6;
 		__be32 *p32;
 
 		__entry->skbaddr = skb;
@@ -65,20 +93,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb,
 		p32 = (__be32 *) __entry->daddr;
 		*p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-		if (sk->sk_family == AF_INET6) {
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			*pin6 = sk->sk_v6_rcv_saddr;
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			*pin6 = sk->sk_v6_daddr;
-		} else
-#endif
-		{
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-		}
+		TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+			      sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
 	),
 
 	TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -127,7 +143,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
 
 	TP_fast_assign(
 		struct inet_sock *inet = inet_sk(sk);
-		struct in6_addr *pin6;
 		__be32 *p32;
 
 		__entry->skaddr = sk;
@@ -141,20 +156,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk,
 		p32 = (__be32 *) __entry->daddr;
 		*p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-		if (sk->sk_family == AF_INET6) {
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			*pin6 = sk->sk_v6_rcv_saddr;
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			*pin6 = sk->sk_v6_daddr;
-		} else
-#endif
-		{
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-		}
+		TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+			       sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
 	),
 
 	TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
@@ -197,7 +200,6 @@ TRACE_EVENT(tcp_set_state,
 
 	TP_fast_assign(
 		struct inet_sock *inet = inet_sk(sk);
-		struct in6_addr *pin6;
 		__be32 *p32;
 
 		__entry->skaddr = sk;
@@ -213,20 +215,8 @@ TRACE_EVENT(tcp_set_state,
 		p32 = (__be32 *) __entry->daddr;
 		*p32 =  inet->inet_daddr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-		if (sk->sk_family == AF_INET6) {
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			*pin6 = sk->sk_v6_rcv_saddr;
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			*pin6 = sk->sk_v6_daddr;
-		} else
-#endif
-		{
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			ipv6_addr_set_v4mapped(inet->inet_saddr, pin6);
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			ipv6_addr_set_v4mapped(inet->inet_daddr, pin6);
-		}
+		TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr,
+			       sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
 	),
 
 	TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s",
@@ -256,7 +246,6 @@ TRACE_EVENT(tcp_retransmit_synack,
 
 	TP_fast_assign(
 		struct inet_request_sock *ireq = inet_rsk(req);
-		struct in6_addr *pin6;
 		__be32 *p32;
 
 		__entry->skaddr = sk;
@@ -271,20 +260,8 @@ TRACE_EVENT(tcp_retransmit_synack,
 		p32 = (__be32 *) __entry->daddr;
 		*p32 = ireq->ir_rmt_addr;
 
-#if IS_ENABLED(CONFIG_IPV6)
-		if (sk->sk_family == AF_INET6) {
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			*pin6 = ireq->ir_v6_loc_addr;
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			*pin6 = ireq->ir_v6_rmt_addr;
-		} else
-#endif
-		{
-			pin6 = (struct in6_addr *)__entry->saddr_v6;
-			ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6);
-			pin6 = (struct in6_addr *)__entry->daddr_v6;
-			ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6);
-		}
+		TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr,
+			      ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr);
 	),
 
 	TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c",
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 326054a72bc7e9ca240a39128be9585531c025cc..8ba0112e533688a26cb3012be334fbc35c9c4e67 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -222,6 +222,17 @@
  * %SKL_TKN_MM_U32_NUM_IN_FMT:  Number of input formats
  * %SKL_TKN_MM_U32_NUM_OUT_FMT: Number of output formats
  *
+ * %SKL_TKN_U32_ASTATE_IDX:     Table Index for the A-State entry to be filled
+ *                              with kcps and clock source
+ *
+ * %SKL_TKN_U32_ASTATE_COUNT:   Number of valid entries in A-State table
+ *
+ * %SKL_TKN_U32_ASTATE_KCPS:    Specifies the core load threshold (in kilo
+ *                              cycles per second) below which DSP is clocked
+ *                              from source specified by clock source.
+ *
+ * %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
+ *
  * module_id and loadable flags dont have tokens as these values will be
  * read from the DSP FW manifest
  *
@@ -309,7 +320,11 @@ enum SKL_TKNS {
 	SKL_TKN_MM_U32_NUM_IN_FMT,
 	SKL_TKN_MM_U32_NUM_OUT_FMT,
 
-	SKL_TKN_MAX = SKL_TKN_MM_U32_NUM_OUT_FMT,
+	SKL_TKN_U32_ASTATE_IDX,
+	SKL_TKN_U32_ASTATE_COUNT,
+	SKL_TKN_U32_ASTATE_KCPS,
+	SKL_TKN_U32_ASTATE_CLK_SRC,
+	SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
 };
 
 #endif
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 4914b93a23f2bdeb066a4048d7ab749456ae3fe3..61f410fd74e4cf4180f7ad5ffa1d996cc1528c91 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,3 +44,8 @@ static inline void xen_balloon_init(void)
 {
 }
 #endif
+
+#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+struct resource;
+void arch_xen_balloon_init(struct resource *hostmem_resource);
+#endif
diff --git a/init/Kconfig b/init/Kconfig
index 2934249fba46746253b89d14f581547ada3d0bad..690a381adee0d164f52c9984f61c0d710c54c089 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -461,10 +461,14 @@ endmenu # "CPU/Task time and stats accounting"
 
 config CPU_ISOLATION
 	bool "CPU isolation"
+	default y
 	help
 	  Make sure that CPUs running critical tasks are not disturbed by
 	  any source of "noise" such as unbound workqueues, timers, kthreads...
-	  Unbound jobs get offloaded to housekeeping CPUs.
+	  Unbound jobs get offloaded to housekeeping CPUs. This is driven by
+	  the "isolcpus=" boot parameter.
+
+	  Say Y if unsure.
 
 source "kernel/rcu/Kconfig"
 
diff --git a/init/main.c b/init/main.c
index e96e3a14533cda199963fe96b97dc78779c66037..a8100b9548398e8b102052f2c1418b21ea423825 100644
--- a/init/main.c
+++ b/init/main.c
@@ -75,6 +75,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <linux/ptrace.h>
+#include <linux/pti.h>
 #include <linux/blkdev.h>
 #include <linux/elevator.h>
 #include <linux/sched_clock.h>
@@ -504,6 +505,10 @@ static void __init mm_init(void)
 	pgtable_init();
 	vmalloc_init();
 	ioremap_huge_init();
+	/* Should be run before the first non-init thread is created */
+	init_espfix_bsp();
+	/* Should be run after espfix64 is set up. */
+	pti_init();
 }
 
 asmlinkage __visible void __init start_kernel(void)
@@ -678,10 +683,6 @@ asmlinkage __visible void __init start_kernel(void)
 #ifdef CONFIG_X86
 	if (efi_enabled(EFI_RUNTIME_SERVICES))
 		efi_enter_virtual_mode();
-#endif
-#ifdef CONFIG_X86_ESPFIX64
-	/* Should be run before the first non-init thread is created */
-	init_espfix_bsp();
 #endif
 	thread_stack_cache_init();
 	cred_init();
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d4593571c4049b8d046f53f81f8e17911a21e0c9..04b24876cd23c83c9502afc60853c871ee3fee13 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
 		break;
 	case PTR_TO_STACK:
 		pointer_desc = "stack ";
+		/* The stack spill tracking logic in check_stack_write()
+		 * and check_stack_read() relies on stack accesses being
+		 * aligned.
+		 */
+		strict = true;
 		break;
 	default:
 		break;
@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
 					   strict);
 }
 
+/* truncate register to smaller size (in bytes)
+ * must be called with size < BPF_REG_SIZE
+ */
+static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
+{
+	u64 mask;
+
+	/* clear high bits in bit representation */
+	reg->var_off = tnum_cast(reg->var_off, size);
+
+	/* fix arithmetic bounds */
+	mask = ((u64)1 << (size * 8)) - 1;
+	if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
+		reg->umin_value &= mask;
+		reg->umax_value &= mask;
+	} else {
+		reg->umin_value = 0;
+		reg->umax_value = mask;
+	}
+	reg->smin_value = reg->umin_value;
+	reg->smax_value = reg->umax_value;
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
 	if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
 	    regs[value_regno].type == SCALAR_VALUE) {
 		/* b/h/w load zero-extends, mark upper bits as known 0 */
-		regs[value_regno].var_off =
-			tnum_cast(regs[value_regno].var_off, size);
-		__update_reg_bounds(&regs[value_regno]);
+		coerce_reg_to_size(&regs[value_regno], size);
 	}
 	return err;
 }
@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
 		tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
 		verbose(env, "invalid variable stack read R%d var_off=%s\n",
 			regno, tn_buf);
+		return -EACCES;
 	}
 	off = regs[regno].off + regs[regno].var_off.value;
 	if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -1674,7 +1701,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 		return -EINVAL;
 	}
 
+	/* With LD_ABS/IND some JITs save/restore skb from r1. */
 	changes_data = bpf_helper_changes_pkt_data(fn->func);
+	if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
+		verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
+			func_id_name(func_id), func_id);
+		return -EINVAL;
+	}
 
 	memset(&meta, 0, sizeof(meta));
 	meta.pkt_access = fn->pkt_access;
@@ -1766,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
 	return 0;
 }
 
-static void coerce_reg_to_32(struct bpf_reg_state *reg)
-{
-	/* clear high 32 bits */
-	reg->var_off = tnum_cast(reg->var_off, 4);
-	/* Update bounds */
-	__update_reg_bounds(reg);
-}
-
 static bool signed_add_overflows(s64 a, s64 b)
 {
 	/* Do the add in u64, where overflow is well-defined */
@@ -1794,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
 	return res > a;
 }
 
+static bool check_reg_sane_offset(struct bpf_verifier_env *env,
+				  const struct bpf_reg_state *reg,
+				  enum bpf_reg_type type)
+{
+	bool known = tnum_is_const(reg->var_off);
+	s64 val = reg->var_off.value;
+	s64 smin = reg->smin_value;
+
+	if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
+		verbose(env, "math between %s pointer and %lld is not allowed\n",
+			reg_type_str[type], val);
+		return false;
+	}
+
+	if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
+		verbose(env, "%s pointer offset %d is not allowed\n",
+			reg_type_str[type], reg->off);
+		return false;
+	}
+
+	if (smin == S64_MIN) {
+		verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
+			reg_type_str[type]);
+		return false;
+	}
+
+	if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
+		verbose(env, "value %lld makes %s pointer be out of bounds\n",
+			smin, reg_type_str[type]);
+		return false;
+	}
+
+	return true;
+}
+
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
  * If we return -EACCES, caller may want to try again treating pointer as a
@@ -1830,29 +1890,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
 	if (BPF_CLASS(insn->code) != BPF_ALU64) {
 		/* 32-bit ALU ops on pointers produce (meaningless) scalars */
-		if (!env->allow_ptr_leaks)
-			verbose(env,
-				"R%d 32-bit pointer arithmetic prohibited\n",
-				dst);
+		verbose(env,
+			"R%d 32-bit pointer arithmetic prohibited\n",
+			dst);
 		return -EACCES;
 	}
 
 	if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
-		if (!env->allow_ptr_leaks)
-			verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
-				dst);
+		verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
+			dst);
 		return -EACCES;
 	}
 	if (ptr_reg->type == CONST_PTR_TO_MAP) {
-		if (!env->allow_ptr_leaks)
-			verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
-				dst);
+		verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
+			dst);
 		return -EACCES;
 	}
 	if (ptr_reg->type == PTR_TO_PACKET_END) {
-		if (!env->allow_ptr_leaks)
-			verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
-				dst);
+		verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
+			dst);
 		return -EACCES;
 	}
 
@@ -1862,6 +1918,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 	dst_reg->type = ptr_reg->type;
 	dst_reg->id = ptr_reg->id;
 
+	if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
+	    !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+		return -EINVAL;
+
 	switch (opcode) {
 	case BPF_ADD:
 		/* We can take a fixed offset as long as it doesn't overflow
@@ -1915,9 +1975,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 	case BPF_SUB:
 		if (dst_reg == off_reg) {
 			/* scalar -= pointer.  Creates an unknown scalar */
-			if (!env->allow_ptr_leaks)
-				verbose(env, "R%d tried to subtract pointer from scalar\n",
-					dst);
+			verbose(env, "R%d tried to subtract pointer from scalar\n",
+				dst);
 			return -EACCES;
 		}
 		/* We don't allow subtraction from FP, because (according to
@@ -1925,9 +1984,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 		 * be able to deal with it.
 		 */
 		if (ptr_reg->type == PTR_TO_STACK) {
-			if (!env->allow_ptr_leaks)
-				verbose(env, "R%d subtraction from stack pointer prohibited\n",
-					dst);
+			verbose(env, "R%d subtraction from stack pointer prohibited\n",
+				dst);
 			return -EACCES;
 		}
 		if (known && (ptr_reg->off - smin_val ==
@@ -1976,28 +2034,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 	case BPF_AND:
 	case BPF_OR:
 	case BPF_XOR:
-		/* bitwise ops on pointers are troublesome, prohibit for now.
-		 * (However, in principle we could allow some cases, e.g.
-		 * ptr &= ~3 which would reduce min_value by 3.)
-		 */
-		if (!env->allow_ptr_leaks)
-			verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
-				dst, bpf_alu_string[opcode >> 4]);
+		/* bitwise ops on pointers are troublesome, prohibit. */
+		verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
+			dst, bpf_alu_string[opcode >> 4]);
 		return -EACCES;
 	default:
 		/* other operators (e.g. MUL,LSH) produce non-pointer results */
-		if (!env->allow_ptr_leaks)
-			verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
-				dst, bpf_alu_string[opcode >> 4]);
+		verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
+			dst, bpf_alu_string[opcode >> 4]);
 		return -EACCES;
 	}
 
+	if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
+		return -EINVAL;
+
 	__update_reg_bounds(dst_reg);
 	__reg_deduce_bounds(dst_reg);
 	__reg_bound_offset(dst_reg);
 	return 0;
 }
 
+/* WARNING: This function does calculations on 64-bit values, but the actual
+ * execution may occur on 32-bit values. Therefore, things like bitshifts
+ * need extra checks in the 32-bit case.
+ */
 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 				      struct bpf_insn *insn,
 				      struct bpf_reg_state *dst_reg,
@@ -2008,12 +2068,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	bool src_known, dst_known;
 	s64 smin_val, smax_val;
 	u64 umin_val, umax_val;
+	u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
-	if (BPF_CLASS(insn->code) != BPF_ALU64) {
-		/* 32-bit ALU ops are (32,32)->64 */
-		coerce_reg_to_32(dst_reg);
-		coerce_reg_to_32(&src_reg);
-	}
 	smin_val = src_reg.smin_value;
 	smax_val = src_reg.smax_value;
 	umin_val = src_reg.umin_value;
@@ -2021,6 +2077,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 	src_known = tnum_is_const(src_reg.var_off);
 	dst_known = tnum_is_const(dst_reg->var_off);
 
+	if (!src_known &&
+	    opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
+		__mark_reg_unknown(dst_reg);
+		return 0;
+	}
+
 	switch (opcode) {
 	case BPF_ADD:
 		if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2149,9 +2211,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 		__update_reg_bounds(dst_reg);
 		break;
 	case BPF_LSH:
-		if (umax_val > 63) {
-			/* Shifts greater than 63 are undefined.  This includes
-			 * shifts by a negative number.
+		if (umax_val >= insn_bitness) {
+			/* Shifts greater than 31 or 63 are undefined.
+			 * This includes shifts by a negative number.
 			 */
 			mark_reg_unknown(env, regs, insn->dst_reg);
 			break;
@@ -2177,27 +2239,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 		__update_reg_bounds(dst_reg);
 		break;
 	case BPF_RSH:
-		if (umax_val > 63) {
-			/* Shifts greater than 63 are undefined.  This includes
-			 * shifts by a negative number.
+		if (umax_val >= insn_bitness) {
+			/* Shifts greater than 31 or 63 are undefined.
+			 * This includes shifts by a negative number.
 			 */
 			mark_reg_unknown(env, regs, insn->dst_reg);
 			break;
 		}
-		/* BPF_RSH is an unsigned shift, so make the appropriate casts */
-		if (dst_reg->smin_value < 0) {
-			if (umin_val) {
-				/* Sign bit will be cleared */
-				dst_reg->smin_value = 0;
-			} else {
-				/* Lost sign bit information */
-				dst_reg->smin_value = S64_MIN;
-				dst_reg->smax_value = S64_MAX;
-			}
-		} else {
-			dst_reg->smin_value =
-				(u64)(dst_reg->smin_value) >> umax_val;
-		}
+		/* BPF_RSH is an unsigned shift.  If the value in dst_reg might
+		 * be negative, then either:
+		 * 1) src_reg might be zero, so the sign bit of the result is
+		 *    unknown, so we lose our signed bounds
+		 * 2) it's known negative, thus the unsigned bounds capture the
+		 *    signed bounds
+		 * 3) the signed bounds cross zero, so they tell us nothing
+		 *    about the result
+		 * If the value in dst_reg is known nonnegative, then again the
+		 * unsigned bounts capture the signed bounds.
+		 * Thus, in all cases it suffices to blow away our signed bounds
+		 * and rely on inferring new ones from the unsigned bounds and
+		 * var_off of the result.
+		 */
+		dst_reg->smin_value = S64_MIN;
+		dst_reg->smax_value = S64_MAX;
 		if (src_known)
 			dst_reg->var_off = tnum_rshift(dst_reg->var_off,
 						       umin_val);
@@ -2213,6 +2277,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
 		break;
 	}
 
+	if (BPF_CLASS(insn->code) != BPF_ALU64) {
+		/* 32-bit ALU ops are (32,32)->32 */
+		coerce_reg_to_size(dst_reg, 4);
+		coerce_reg_to_size(&src_reg, 4);
+	}
+
 	__reg_deduce_bounds(dst_reg);
 	__reg_bound_offset(dst_reg);
 	return 0;
@@ -2227,7 +2297,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
 	struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
 	struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
 	u8 opcode = BPF_OP(insn->code);
-	int rc;
 
 	dst_reg = &regs[insn->dst_reg];
 	src_reg = NULL;
@@ -2238,43 +2307,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
 		if (src_reg->type != SCALAR_VALUE) {
 			if (dst_reg->type != SCALAR_VALUE) {
 				/* Combining two pointers by any ALU op yields
-				 * an arbitrary scalar.
+				 * an arbitrary scalar. Disallow all math except
+				 * pointer subtraction
 				 */
-				if (!env->allow_ptr_leaks) {
-					verbose(env, "R%d pointer %s pointer prohibited\n",
-						insn->dst_reg,
-						bpf_alu_string[opcode >> 4]);
-					return -EACCES;
+				if (opcode == BPF_SUB){
+					mark_reg_unknown(env, regs, insn->dst_reg);
+					return 0;
 				}
-				mark_reg_unknown(env, regs, insn->dst_reg);
-				return 0;
+				verbose(env, "R%d pointer %s pointer prohibited\n",
+					insn->dst_reg,
+					bpf_alu_string[opcode >> 4]);
+				return -EACCES;
 			} else {
 				/* scalar += pointer
 				 * This is legal, but we have to reverse our
 				 * src/dest handling in computing the range
 				 */
-				rc = adjust_ptr_min_max_vals(env, insn,
-							     src_reg, dst_reg);
-				if (rc == -EACCES && env->allow_ptr_leaks) {
-					/* scalar += unknown scalar */
-					__mark_reg_unknown(&off_reg);
-					return adjust_scalar_min_max_vals(
-							env, insn,
-							dst_reg, off_reg);
-				}
-				return rc;
+				return adjust_ptr_min_max_vals(env, insn,
+							       src_reg, dst_reg);
 			}
 		} else if (ptr_reg) {
 			/* pointer += scalar */
-			rc = adjust_ptr_min_max_vals(env, insn,
-						     dst_reg, src_reg);
-			if (rc == -EACCES && env->allow_ptr_leaks) {
-				/* unknown scalar += scalar */
-				__mark_reg_unknown(dst_reg);
-				return adjust_scalar_min_max_vals(
-						env, insn, dst_reg, *src_reg);
-			}
-			return rc;
+			return adjust_ptr_min_max_vals(env, insn,
+						       dst_reg, src_reg);
 		}
 	} else {
 		/* Pretend the src is a reg with a known value, since we only
@@ -2283,17 +2338,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
 		off_reg.type = SCALAR_VALUE;
 		__mark_reg_known(&off_reg, insn->imm);
 		src_reg = &off_reg;
-		if (ptr_reg) { /* pointer += K */
-			rc = adjust_ptr_min_max_vals(env, insn,
-						     ptr_reg, src_reg);
-			if (rc == -EACCES && env->allow_ptr_leaks) {
-				/* unknown scalar += K */
-				__mark_reg_unknown(dst_reg);
-				return adjust_scalar_min_max_vals(
-						env, insn, dst_reg, off_reg);
-			}
-			return rc;
-		}
+		if (ptr_reg) /* pointer += K */
+			return adjust_ptr_min_max_vals(env, insn,
+						       ptr_reg, src_reg);
 	}
 
 	/* Got here implies adding two SCALAR_VALUEs */
@@ -2390,17 +2437,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 					return -EACCES;
 				}
 				mark_reg_unknown(env, regs, insn->dst_reg);
-				/* high 32 bits are known zero. */
-				regs[insn->dst_reg].var_off = tnum_cast(
-						regs[insn->dst_reg].var_off, 4);
-				__update_reg_bounds(&regs[insn->dst_reg]);
+				coerce_reg_to_size(&regs[insn->dst_reg], 4);
 			}
 		} else {
 			/* case: R = imm
 			 * remember the value we stored into this reg
 			 */
 			regs[insn->dst_reg].type = SCALAR_VALUE;
-			__mark_reg_known(regs + insn->dst_reg, insn->imm);
+			if (BPF_CLASS(insn->code) == BPF_ALU64) {
+				__mark_reg_known(regs + insn->dst_reg,
+						 insn->imm);
+			} else {
+				__mark_reg_known(regs + insn->dst_reg,
+						 (u32)insn->imm);
+			}
 		}
 
 	} else if (opcode > BPF_END) {
@@ -3431,15 +3481,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
 			return range_within(rold, rcur) &&
 			       tnum_in(rold->var_off, rcur->var_off);
 		} else {
-			/* if we knew anything about the old value, we're not
-			 * equal, because we can't know anything about the
-			 * scalar value of the pointer in the new value.
+			/* We're trying to use a pointer in place of a scalar.
+			 * Even if the scalar was unbounded, this could lead to
+			 * pointer leaks because scalars are allowed to leak
+			 * while pointers are not. We could make this safe in
+			 * special cases if root is calling us, but it's
+			 * probably not worth the hassle.
 			 */
-			return rold->umin_value == 0 &&
-			       rold->umax_value == U64_MAX &&
-			       rold->smin_value == S64_MIN &&
-			       rold->smax_value == S64_MAX &&
-			       tnum_is_unknown(rold->var_off);
+			return false;
 		}
 	case PTR_TO_MAP_VALUE:
 		/* If the new min/max/var_off satisfy the old ones and
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 41376c3ac93b06c8163d6d764fc9a015bafdcdb4..53f7dc65f9a3b917c1c347834257cf26521eff95 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -80,19 +80,19 @@ static struct lockdep_map cpuhp_state_down_map =
 	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
 
 
-static void inline cpuhp_lock_acquire(bool bringup)
+static inline void cpuhp_lock_acquire(bool bringup)
 {
 	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 }
 
-static void inline cpuhp_lock_release(bool bringup)
+static inline void cpuhp_lock_release(bool bringup)
 {
 	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 }
 #else
 
-static void inline cpuhp_lock_acquire(bool bringup) { }
-static void inline cpuhp_lock_release(bool bringup) { }
+static inline void cpuhp_lock_acquire(bool bringup) { }
+static inline void cpuhp_lock_release(bool bringup) { }
 
 #endif
 
@@ -1277,9 +1277,9 @@ static struct cpuhp_step cpuhp_bp_states[] = {
 	 * before blk_mq_queue_reinit_notify() from notify_dead(),
 	 * otherwise a RCU stall occurs.
 	 */
-	[CPUHP_TIMERS_DEAD] = {
+	[CPUHP_TIMERS_PREPARE] = {
 		.name			= "timers:dead",
-		.startup.single		= NULL,
+		.startup.single		= timers_prepare_cpu,
 		.teardown.single	= timers_dead_cpu,
 	},
 	/* Kicks the plugged cpu into life */
diff --git a/kernel/fork.c b/kernel/fork.c
index 432eadf6b58c18d9de6a3d09f3fef36089b4b5a2..2295fc69717f6c3d877ef3cac15b55336d7746c6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -721,8 +721,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
 			goto out;
 	}
 	/* a new mm has just been created */
-	arch_dup_mmap(oldmm, mm);
-	retval = 0;
+	retval = arch_dup_mmap(oldmm, mm);
 out:
 	up_write(&mm->mmap_sem);
 	flush_tlb_mm(oldmm);
diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h
index 17f05ef8f575f996f87e614a914bbe21d537f949..e4d3819a91cc7d7bda5416bfd5aaba99a5576cce 100644
--- a/kernel/irq/debug.h
+++ b/kernel/irq/debug.h
@@ -12,6 +12,11 @@
 
 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
 {
+	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+	if (!__ratelimit(&ratelimit))
+		return;
+
 	printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
 		irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
 	printk("->handle_irq():  %p, ", desc->handle_irq);
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 7f608ac3965379fc112d85051fd80325b2ec84ce..acfaaef8672ad2b1d2419bd7522f5556ebb1ae50 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -113,6 +113,7 @@ static const struct irq_bit_descr irqdata_states[] = {
 	BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
 	BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
 	BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
+	BIT_MASK_DESCR(IRQD_CAN_RESERVE),
 
 	BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
 
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index c26c5bb6b491f75f76f1190cdc21989f79d17e09..508c03dfef254b9dc8c3a7a1e3e7307d3a7201b3 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -364,10 +364,11 @@ irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
 EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
 
 /*
- * Separate lockdep class for interrupt chip which can nest irq_desc
- * lock.
+ * Separate lockdep classes for interrupt chip which can nest irq_desc
+ * lock and request mutex.
  */
 static struct lock_class_key irq_nested_lock_class;
+static struct lock_class_key irq_nested_request_class;
 
 /*
  * irq_map_generic_chip - Map a generic chip for an irq domain
@@ -409,7 +410,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
 	set_bit(idx, &gc->installed);
 
 	if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
-		irq_set_lockdep_class(virq, &irq_nested_lock_class);
+		irq_set_lockdep_class(virq, &irq_nested_lock_class,
+				      &irq_nested_request_class);
 
 	if (chip->irq_calc_mask)
 		chip->irq_calc_mask(data);
@@ -479,7 +481,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
 			continue;
 
 		if (flags & IRQ_GC_INIT_NESTED_LOCK)
-			irq_set_lockdep_class(i, &irq_nested_lock_class);
+			irq_set_lockdep_class(i, &irq_nested_lock_class,
+					      &irq_nested_request_class);
 
 		if (!(flags & IRQ_GC_NO_MASK)) {
 			struct irq_data *d = irq_get_irq_data(i);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 07d08ca701ec4627b558d0435c54cf6de7e147d2..ab19371eab9b8e1e9a7789b882ed4a308883b555 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -440,7 +440,7 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
 #endif /* !CONFIG_GENERIC_PENDING_IRQ */
 
 #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
-static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
+static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve)
 {
 	irqd_set_activated(data);
 	return 0;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 4f4f60015e8ab4196ef1df5107f04beda37d4c6c..62068ad46930dd12088081df5233dffdfee0d7f4 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1693,7 +1693,7 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
 	}
 }
 
-static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
+static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
 {
 	int ret = 0;
 
@@ -1702,9 +1702,9 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
 
 		if (irqd->parent_data)
 			ret = __irq_domain_activate_irq(irqd->parent_data,
-							early);
+							reserve);
 		if (!ret && domain->ops->activate) {
-			ret = domain->ops->activate(domain, irqd, early);
+			ret = domain->ops->activate(domain, irqd, reserve);
 			/* Rollback in case of error */
 			if (ret && irqd->parent_data)
 				__irq_domain_deactivate_irq(irqd->parent_data);
@@ -1716,17 +1716,18 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early)
 /**
  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  *			     interrupt
- * @irq_data:	outermost irq_data associated with interrupt
+ * @irq_data:	Outermost irq_data associated with interrupt
+ * @reserve:	If set only reserve an interrupt vector instead of assigning one
  *
  * This is the second step to call domain_ops->activate to program interrupt
  * controllers, so the interrupt could actually get delivered.
  */
-int irq_domain_activate_irq(struct irq_data *irq_data, bool early)
+int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
 {
 	int ret = 0;
 
 	if (!irqd_is_activated(irq_data))
-		ret = __irq_domain_activate_irq(irq_data, early);
+		ret = __irq_domain_activate_irq(irq_data, reserve);
 	if (!ret)
 		irqd_set_activated(irq_data);
 	return ret;
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index edb987b2c58dc1553342b5a87c91335b42888e8b..2f3c4f5382cc6bad8daf528146cf613d9a1ac3e6 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -339,6 +339,40 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
 	return ret;
 }
 
+/*
+ * Carefully check whether the device can use reservation mode. If
+ * reservation mode is enabled then the early activation will assign a
+ * dummy vector to the device. If the PCI/MSI device does not support
+ * masking of the entry then this can result in spurious interrupts when
+ * the device driver is not absolutely careful. But even then a malfunction
+ * of the hardware could result in a spurious interrupt on the dummy vector
+ * and render the device unusable. If the entry can be masked then the core
+ * logic will prevent the spurious interrupt and reservation mode can be
+ * used. For now reservation mode is restricted to PCI/MSI.
+ */
+static bool msi_check_reservation_mode(struct irq_domain *domain,
+				       struct msi_domain_info *info,
+				       struct device *dev)
+{
+	struct msi_desc *desc;
+
+	if (domain->bus_token != DOMAIN_BUS_PCI_MSI)
+		return false;
+
+	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
+		return false;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
+		return false;
+
+	/*
+	 * Checking the first MSI descriptor is sufficient. MSIX supports
+	 * masking and MSI does so when the maskbit is set.
+	 */
+	desc = first_msi_entry(dev);
+	return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
+}
+
 /**
  * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
  * @domain:	The domain to allocate from
@@ -353,9 +387,11 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 {
 	struct msi_domain_info *info = domain->host_data;
 	struct msi_domain_ops *ops = info->ops;
-	msi_alloc_info_t arg;
+	struct irq_data *irq_data;
 	struct msi_desc *desc;
+	msi_alloc_info_t arg;
 	int i, ret, virq;
+	bool can_reserve;
 
 	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
 	if (ret)
@@ -385,6 +421,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 	if (ops->msi_finish)
 		ops->msi_finish(&arg, 0);
 
+	can_reserve = msi_check_reservation_mode(domain, info, dev);
+
 	for_each_msi_entry(desc, dev) {
 		virq = desc->irq;
 		if (desc->nvec_used == 1)
@@ -397,15 +435,25 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
 		 * the MSI entries before the PCI layer enables MSI in the
 		 * card. Otherwise the card latches a random msi message.
 		 */
-		if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
-			struct irq_data *irq_data;
+		if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
+			continue;
 
+		irq_data = irq_domain_get_irq_data(domain, desc->irq);
+		if (!can_reserve)
+			irqd_clr_can_reserve(irq_data);
+		ret = irq_domain_activate_irq(irq_data, can_reserve);
+		if (ret)
+			goto cleanup;
+	}
+
+	/*
+	 * If these interrupts use reservation mode, clear the activated bit
+	 * so request_irq() will assign the final vector.
+	 */
+	if (can_reserve) {
+		for_each_msi_entry(desc, dev) {
 			irq_data = irq_domain_get_irq_data(domain, desc->irq);
-			ret = irq_domain_activate_irq(irq_data, true);
-			if (ret)
-				goto cleanup;
-			if (info->flags & MSI_FLAG_MUST_REACTIVATE)
-				irqd_clr_activated(irq_data);
+			irqd_clr_activated(irq_data);
 		}
 	}
 	return 0;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 2f52ec0f1539fce62a099d9fa559df334a19869c..d6717a3331a1b21bd1be02f034596ab293e8ceac 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -244,7 +244,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
 #ifdef CONFIG_NO_HZ_COMMON
 static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
 {
-	unsigned long idle_calls = tick_nohz_get_idle_calls();
+	unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
 	bool ret = idle_calls == sg_cpu->saved_idle_calls;
 
 	sg_cpu->saved_idle_calls = idle_calls;
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index e776fc8cc1df3bc6dd09e7722250f87f183fb5ef..f6b5f19223d6cb78efb3179f6dc0fd15383aeded 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -95,6 +95,7 @@ config NO_HZ_FULL
 	select RCU_NOCB_CPU
 	select VIRT_CPU_ACCOUNTING_GEN
 	select IRQ_WORK
+	select CPU_ISOLATION
 	help
 	 Adaptively try to shutdown the tick whenever possible, even when
 	 the CPU is running tasks. Typically this requires running a single
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 99578f06c8d4fe57cb15cdd44fd58865cae8d0f3..f7cc7abfcf252f0cb6fadb3b9437b9fd096b8baf 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 	ts->next_tick = 0;
 }
 
+static inline bool local_timer_softirq_pending(void)
+{
+	return local_softirq_pending() & TIMER_SOFTIRQ;
+}
+
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 					 ktime_t now, int cpu)
 {
@@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
 	} while (read_seqretry(&jiffies_lock, seq));
 	ts->last_jiffies = basejiff;
 
-	if (rcu_needs_cpu(basemono, &next_rcu) ||
-	    arch_needs_cpu() || irq_work_needs_cpu()) {
+	/*
+	 * Keep the periodic tick, when RCU, architecture or irq_work
+	 * requests it.
+	 * Aside of that check whether the local timer softirq is
+	 * pending. If so its a bad idea to call get_next_timer_interrupt()
+	 * because there is an already expired timer, so it will request
+	 * immeditate expiry, which rearms the hardware timer with a
+	 * minimal delta which brings us back to this place
+	 * immediately. Lather, rinse and repeat...
+	 */
+	if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
+	    irq_work_needs_cpu() || local_timer_softirq_pending()) {
 		next_tick = basemono + TICK_NSEC;
 	} else {
 		/*
@@ -985,6 +1000,19 @@ ktime_t tick_nohz_get_sleep_length(void)
 	return ts->sleep_length;
 }
 
+/**
+ * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
+ * for a particular CPU.
+ *
+ * Called from the schedutil frequency scaling governor in scheduler context.
+ */
+unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
+{
+	struct tick_sched *ts = tick_get_tick_sched(cpu);
+
+	return ts->idle_calls;
+}
+
 /**
  * tick_nohz_get_idle_calls - return the current idle calls counter value
  *
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ffebcf878fba5d5cf67f5e9abcece25c16259919..89a9e1b4264a07a9a6d69e37759c27ced8b4de37 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -823,11 +823,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
 	struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
 
 	/*
-	 * If the timer is deferrable and nohz is active then we need to use
-	 * the deferrable base.
+	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
+	 * to use the deferrable base.
 	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-	    (tflags & TIMER_DEFERRABLE))
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
 		base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
 	return base;
 }
@@ -837,11 +836,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
 	struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
 	/*
-	 * If the timer is deferrable and nohz is active then we need to use
-	 * the deferrable base.
+	 * If the timer is deferrable and NO_HZ_COMMON is set then we need
+	 * to use the deferrable base.
 	 */
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
-	    (tflags & TIMER_DEFERRABLE))
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
 		base = this_cpu_ptr(&timer_bases[BASE_DEF]);
 	return base;
 }
@@ -1009,8 +1007,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
 	if (!ret && (options & MOD_TIMER_PENDING_ONLY))
 		goto out_unlock;
 
-	debug_activate(timer, expires);
-
 	new_base = get_target_base(base, timer->flags);
 
 	if (base != new_base) {
@@ -1034,6 +1030,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option
 		}
 	}
 
+	debug_activate(timer, expires);
+
 	timer->expires = expires;
 	/*
 	 * If 'idx' was calculated above and the base time did not advance
@@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
 	base->must_forward_clk = false;
 
 	__run_timers(base);
-	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+	if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
 		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 }
 
@@ -1855,6 +1853,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
 	}
 }
 
+int timers_prepare_cpu(unsigned int cpu)
+{
+	struct timer_base *base;
+	int b;
+
+	for (b = 0; b < NR_BASES; b++) {
+		base = per_cpu_ptr(&timer_bases[b], cpu);
+		base->clk = jiffies;
+		base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
+		base->is_idle = false;
+		base->must_forward_clk = true;
+	}
+	return 0;
+}
+
 int timers_dead_cpu(unsigned int cpu)
 {
 	struct timer_base *old_base;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c87766c1c20446de2d191edda885669d447adecc..9ab18995ff1ebeea0e862f48b43f64924c87e127 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 /* Missed count stored at end */
 #define RB_MISSED_STORED	(1 << 30)
 
+#define RB_MISSED_FLAGS		(RB_MISSED_EVENTS|RB_MISSED_STORED)
+
 struct buffer_data_page {
 	u64		 time_stamp;	/* page time stamp */
 	local_t		 commit;	/* write committed index */
@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
  */
 size_t ring_buffer_page_len(void *page)
 {
-	return local_read(&((struct buffer_data_page *)page)->commit)
+	struct buffer_data_page *bpage = page;
+
+	return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
 		+ BUF_PAGE_HDR_SIZE;
 }
 
@@ -4400,8 +4404,13 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
 {
 	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 	struct buffer_data_page *bpage = data;
+	struct page *page = virt_to_page(bpage);
 	unsigned long flags;
 
+	/* If the page is still in use someplace else, we can't reuse it */
+	if (page_ref_count(page) > 1)
+		goto out;
+
 	local_irq_save(flags);
 	arch_spin_lock(&cpu_buffer->lock);
 
@@ -4413,6 +4422,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
 	arch_spin_unlock(&cpu_buffer->lock);
 	local_irq_restore(flags);
 
+ out:
 	free_page((unsigned long)bpage);
 }
 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 59518b8126d04b4f1f62a526571490dda8398e3b..2a8d8a294345a258baca50b8a6b272c1ac0fc658 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6769,7 +6769,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 		.spd_release	= buffer_spd_release,
 	};
 	struct buffer_ref *ref;
-	int entries, size, i;
+	int entries, i;
 	ssize_t ret = 0;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
@@ -6823,14 +6823,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
 			break;
 		}
 
-		/*
-		 * zero out any left over data, this is going to
-		 * user land.
-		 */
-		size = ring_buffer_page_len(ref->page);
-		if (size < PAGE_SIZE)
-			memset(ref->page + size, 0, PAGE_SIZE - size);
-
 		page = virt_to_page(ref->page);
 
 		spd.pages[i] = page;
@@ -7588,6 +7580,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
 	buf->data = alloc_percpu(struct trace_array_cpu);
 	if (!buf->data) {
 		ring_buffer_free(buf->buffer);
+		buf->buffer = NULL;
 		return -ENOMEM;
 	}
 
@@ -7611,7 +7604,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
 				    allocate_snapshot ? size : 1);
 	if (WARN_ON(ret)) {
 		ring_buffer_free(tr->trace_buffer.buffer);
+		tr->trace_buffer.buffer = NULL;
 		free_percpu(tr->trace_buffer.data);
+		tr->trace_buffer.data = NULL;
 		return -ENOMEM;
 	}
 	tr->allocated_snapshot = allocate_snapshot;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index c3e84edc47c965d40199b652ba78876cdaa9c70c..2615074d3de5c63e63da31995adc4a1f07f7e9fd 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -346,7 +346,8 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
 static void zap_modalias_env(struct kobj_uevent_env *env)
 {
 	static const char modalias_prefix[] = "MODALIAS=";
-	int i;
+	size_t len;
+	int i, j;
 
 	for (i = 0; i < env->envp_idx;) {
 		if (strncmp(env->envp[i], modalias_prefix,
@@ -355,11 +356,18 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
 			continue;
 		}
 
-		if (i != env->envp_idx - 1)
-			memmove(&env->envp[i], &env->envp[i + 1],
-				sizeof(env->envp[i]) * env->envp_idx - 1);
+		len = strlen(env->envp[i]) + 1;
+
+		if (i != env->envp_idx - 1) {
+			memmove(env->envp[i], env->envp[i + 1],
+				env->buflen - len);
+
+			for (j = i; j < env->envp_idx - 1; j++)
+				env->envp[j] = env->envp[j + 1] - len;
+		}
 
 		env->envp_idx--;
+		env->buflen -= len;
 	}
 }
 
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa8812ae6776ee31712fe88c58da4048ff9c31e4..9e97480892709957e127e9941710ce45f11ff724 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -435,6 +435,41 @@ loop:
 	return 0;
 }
 
+static int bpf_fill_ld_abs_vlan_push_pop2(struct bpf_test *self)
+{
+	struct bpf_insn *insn;
+
+	insn = kmalloc_array(16, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	/* Due to func address being non-const, we need to
+	 * assemble this here.
+	 */
+	insn[0] = BPF_MOV64_REG(R6, R1);
+	insn[1] = BPF_LD_ABS(BPF_B, 0);
+	insn[2] = BPF_LD_ABS(BPF_H, 0);
+	insn[3] = BPF_LD_ABS(BPF_W, 0);
+	insn[4] = BPF_MOV64_REG(R7, R6);
+	insn[5] = BPF_MOV64_IMM(R6, 0);
+	insn[6] = BPF_MOV64_REG(R1, R7);
+	insn[7] = BPF_MOV64_IMM(R2, 1);
+	insn[8] = BPF_MOV64_IMM(R3, 2);
+	insn[9] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+			       bpf_skb_vlan_push_proto.func - __bpf_call_base);
+	insn[10] = BPF_MOV64_REG(R6, R7);
+	insn[11] = BPF_LD_ABS(BPF_B, 0);
+	insn[12] = BPF_LD_ABS(BPF_H, 0);
+	insn[13] = BPF_LD_ABS(BPF_W, 0);
+	insn[14] = BPF_MOV64_IMM(R0, 42);
+	insn[15] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = 16;
+
+	return 0;
+}
+
 static int bpf_fill_jump_around_ld_abs(struct bpf_test *self)
 {
 	unsigned int len = BPF_MAXINSNS;
@@ -6066,6 +6101,14 @@ static struct bpf_test tests[] = {
 		{},
 		{ {0x1, 0x42 } },
 	},
+	{
+		"LD_ABS with helper changing skb data",
+		{ },
+		INTERNAL,
+		{ 0x34 },
+		{ { ETH_HLEN, 42 } },
+		.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
+	},
 };
 
 static struct net_device dev;
diff --git a/lib/timerqueue.c b/lib/timerqueue.c
index 4a720ed4fdafd575df46159a3d51131902d638e4..0d54bcbc8170c7754aef0487eb10b8f7b5773103 100644
--- a/lib/timerqueue.c
+++ b/lib/timerqueue.c
@@ -33,8 +33,9 @@
  * @head: head of timerqueue
  * @node: timer node to be added
  *
- * Adds the timer node to the timerqueue, sorted by the
- * node's expires value.
+ * Adds the timer node to the timerqueue, sorted by the node's expires
+ * value. Returns true if the newly added timer is the first expiring timer in
+ * the queue.
  */
 bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
 {
@@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
  * @head: head of timerqueue
  * @node: timer node to be removed
  *
- * Removes the timer node from the timerqueue.
+ * Removes the timer node from the timerqueue. Returns true if the queue is
+ * not empty after the remove.
  */
 bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
 {
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 84b2dc76f140e922e2ed0d7c4d545b4d4ddf496d..b5f940ce0143ba061a183db0df3ef0dc17f57c72 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -882,13 +882,10 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 	if (IS_ERR(dev))
 		return PTR_ERR(dev);
 
-	if (bdi_debug_register(bdi, dev_name(dev))) {
-		device_destroy(bdi_class, dev->devt);
-		return -ENOMEM;
-	}
 	cgwb_bdi_register(bdi);
 	bdi->dev = dev;
 
+	bdi_debug_register(bdi, dev_name(dev));
 	set_bit(WB_registered, &bdi->wb.state);
 
 	spin_lock_bh(&bdi_lock);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index d0ef0a8e8831920cb86fc767eb0d756bf89feb46..015f465c514b28564c9e91eec40dc041b765fe25 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1262,19 +1262,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
 	struct net_bridge *br = netdev_priv(dev);
 	int err;
 
+	err = register_netdevice(dev);
+	if (err)
+		return err;
+
 	if (tb[IFLA_ADDRESS]) {
 		spin_lock_bh(&br->lock);
 		br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
 		spin_unlock_bh(&br->lock);
 	}
 
-	err = register_netdevice(dev);
-	if (err)
-		return err;
-
 	err = br_changelink(dev, tb, data, extack);
 	if (err)
-		unregister_netdevice(dev);
+		br_dev_delete(dev, NULL);
+
 	return err;
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index f47e96b623088ae354947787ef17217cd32ae64e..01ee854454a8089cdd49e2c8964a99f6a2d74730 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3904,7 +3904,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
 				     hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
 				     troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
 			goto do_drop;
-		if (troom > 0 && __skb_linearize(skb))
+		if (skb_linearize(skb))
 			goto do_drop;
 	}
 
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b797832565d34ccefb374ee87f9cbc460779a484..60a71be75aea063b418a48ade2a1e1c7804ab35c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -267,7 +267,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
 	spin_lock_bh(&net->nsid_lock);
 	peer = idr_find(&net->netns_ids, id);
 	if (peer)
-		get_net(peer);
+		peer = maybe_get_net(peer);
 	spin_unlock_bh(&net->nsid_lock);
 	rcu_read_unlock();
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a592ca025fc46bf4ff26c900d273b1ae12afa334..08f57408131523adec3cc36044952e14aa5a6b9a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1177,12 +1177,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
 	int i, new_frags;
 	u32 d_off;
 
-	if (!num_frags)
-		return 0;
-
 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
 		return -EINVAL;
 
+	if (!num_frags)
+		goto release;
+
 	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	for (i = 0; i < new_frags; i++) {
 		page = alloc_page(gfp_mask);
@@ -1238,6 +1238,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
 	skb_shinfo(skb)->nr_frags = new_frags;
 
+release:
 	skb_zcopy_clear(skb, false);
 	return 0;
 }
@@ -3654,8 +3655,6 @@ normal:
 
 		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
 					      SKBTX_SHARED_FRAG;
-		if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC))
-			goto err;
 
 		while (pos < offset + len) {
 			if (i >= nfrags) {
@@ -3681,6 +3680,8 @@ normal:
 
 			if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
 				goto err;
+			if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+				goto err;
 
 			*nskb_frag = *frag;
 			__skb_frag_ref(nskb_frag);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f52d27a422c37298b2ad0c1dbba0e5307f1a46b6..08259d078b1ca821c581aeb34251c79a9aba8c8d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1298,14 +1298,19 @@ err_table_hash_alloc:
 
 static void ip_fib_net_exit(struct net *net)
 {
-	unsigned int i;
+	int i;
 
 	rtnl_lock();
 #ifdef CONFIG_IP_MULTIPLE_TABLES
 	RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
 	RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
 #endif
-	for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
+	/* Destroy the tables in reverse order to guarantee that the
+	 * local table, ID 255, is destroyed before the main table, ID
+	 * 254. This is necessary as the local table may contain
+	 * references to data contained in the main table.
+	 */
+	for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
 		struct hlist_head *head = &net->ipv4.fib_table_hash[i];
 		struct hlist_node *tmp;
 		struct fib_table *tb;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f04d944f8abe0bfbb840837bb35d28fe6d8d25d0..c586597da20dbb0e46eb0f693fd65bccfc8f3633 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -698,7 +698,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
 
 	nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
 		int type = nla_type(nla);
-		u32 val;
+		u32 fi_val, val;
 
 		if (!type)
 			continue;
@@ -715,7 +715,11 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
 			val = nla_get_u32(nla);
 		}
 
-		if (fi->fib_metrics->metrics[type - 1] != val)
+		fi_val = fi->fib_metrics->metrics[type - 1];
+		if (type == RTAX_FEATURES)
+			fi_val &= ~DST_FEATURE_ECN_CA;
+
+		if (fi_val != val)
 			return false;
 	}
 
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 9c1735632c8c43cc0607d54403571362895d91e2..45ffd3d045d240cad8e4d0ed8dd0dd7da997bf9e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1310,6 +1310,7 @@ static const struct net_device_ops erspan_netdev_ops = {
 static void ipgre_tap_setup(struct net_device *dev)
 {
 	ether_setup(dev);
+	dev->max_mtu = 0;
 	dev->netdev_ops	= &gre_tap_netdev_ops;
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	dev->priv_flags	|= IFF_LIVE_ADDR_CHANGE;
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index e50b7fea57ee35c117002463f473ccd41face358..bcfc00e88756dabb1f491d3d41137ccbc7ab1cbc 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
 	return xfrm4_extract_header(skb);
 }
 
+static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
+				   struct sk_buff *skb)
+{
+	return dst_input(skb);
+}
+
 static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
 					 struct sk_buff *skb)
 {
@@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
 					 iph->tos, skb->dev))
 			goto drop;
 	}
-	return dst_input(skb);
+
+	if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2))
+		goto drop;
+
+	return 0;
 drop:
 	kfree_skb(skb);
 	return NET_RX_DROP;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index c26f71234b9c01a82ec9d40423ee28957468ed46..c9441ca4539936486291147a47a84ef1b2ecf095 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -210,7 +210,6 @@ lookup_protocol:
 	np->mcast_hops	= IPV6_DEFAULT_MCASTHOPS;
 	np->mc_loop	= 1;
 	np->pmtudisc	= IPV6_PMTUDISC_WANT;
-	np->autoflowlabel = ip6_default_np_autolabel(net);
 	np->repflow	= net->ipv6.sysctl.flowlabel_reflect;
 	sk->sk_ipv6only	= net->ipv6.sysctl.bindv6only;
 
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4cfd8e0696fe77f6d7af7ca3579a2418aef972f6..772695960890893f9ab7862cf64728b783f5bb96 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1014,6 +1014,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
 	eth_random_addr(dev->perm_addr);
 }
 
+#define GRE6_FEATURES (NETIF_F_SG |		\
+		       NETIF_F_FRAGLIST |	\
+		       NETIF_F_HIGHDMA |	\
+		       NETIF_F_HW_CSUM)
+
+static void ip6gre_tnl_init_features(struct net_device *dev)
+{
+	struct ip6_tnl *nt = netdev_priv(dev);
+
+	dev->features		|= GRE6_FEATURES;
+	dev->hw_features	|= GRE6_FEATURES;
+
+	if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
+		/* TCP offload with GRE SEQ is not supported, nor
+		 * can we support 2 levels of outer headers requiring
+		 * an update.
+		 */
+		if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
+		    nt->encap.type == TUNNEL_ENCAP_NONE) {
+			dev->features    |= NETIF_F_GSO_SOFTWARE;
+			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+		}
+
+		/* Can use a lockless transmit, unless we generate
+		 * output sequences
+		 */
+		dev->features |= NETIF_F_LLTX;
+	}
+}
+
 static int ip6gre_tunnel_init_common(struct net_device *dev)
 {
 	struct ip6_tnl *tunnel;
@@ -1048,6 +1078,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
 	if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
 		dev->mtu -= 8;
 
+	ip6gre_tnl_init_features(dev);
+
 	return 0;
 }
 
@@ -1298,16 +1330,12 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
 	.ndo_get_iflink = ip6_tnl_get_iflink,
 };
 
-#define GRE6_FEATURES (NETIF_F_SG |		\
-		       NETIF_F_FRAGLIST |	\
-		       NETIF_F_HIGHDMA |		\
-		       NETIF_F_HW_CSUM)
-
 static void ip6gre_tap_setup(struct net_device *dev)
 {
 
 	ether_setup(dev);
 
+	dev->max_mtu = 0;
 	dev->netdev_ops = &ip6gre_tap_netdev_ops;
 	dev->needs_free_netdev = true;
 	dev->priv_destructor = ip6gre_dev_free;
@@ -1382,26 +1410,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
 	nt->net = dev_net(dev);
 	ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
 
-	dev->features		|= GRE6_FEATURES;
-	dev->hw_features	|= GRE6_FEATURES;
-
-	if (!(nt->parms.o_flags & TUNNEL_SEQ)) {
-		/* TCP offload with GRE SEQ is not supported, nor
-		 * can we support 2 levels of outer headers requiring
-		 * an update.
-		 */
-		if (!(nt->parms.o_flags & TUNNEL_CSUM) ||
-		    (nt->encap.type == TUNNEL_ENCAP_NONE)) {
-			dev->features    |= NETIF_F_GSO_SOFTWARE;
-			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
-		}
-
-		/* Can use a lockless transmit, unless we generate
-		 * output sequences
-		 */
-		dev->features |= NETIF_F_LLTX;
-	}
-
 	err = register_netdevice(dev);
 	if (err)
 		goto out;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5110a418cc4d0c1040506394460cb482698d8c15..f7dd51c4231415fd1321fd431194d896ea2d1689 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -166,6 +166,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
+static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
+{
+	if (!np->autoflowlabel_set)
+		return ip6_default_np_autolabel(net);
+	else
+		return np->autoflowlabel;
+}
+
 /*
  * xmit an sk_buff (used by TCP, SCTP and DCCP)
  * Note : socket lock is not held for SYNACK packets, but might be modified
@@ -230,7 +238,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 		hlimit = ip6_dst_hoplimit(dst);
 
 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-						     np->autoflowlabel, fl6));
+				ip6_autoflowlabel(net, np), fl6));
 
 	hdr->payload_len = htons(seg_len);
 	hdr->nexthdr = proto;
@@ -1626,7 +1634,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
 
 	ip6_flow_hdr(hdr, v6_cork->tclass,
 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-					np->autoflowlabel, fl6));
+					ip6_autoflowlabel(net, np), fl6));
 	hdr->hop_limit = v6_cork->hop_limit;
 	hdr->nexthdr = proto;
 	hdr->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index db84f523656ddf876e1971c416ee03a6a1794d9d..931c38f6ff4a42fb17cf129cf6035706a24176dc 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1123,8 +1123,13 @@ route_lookup:
 		max_headroom += 8;
 		mtu -= 8;
 	}
-	if (mtu < IPV6_MIN_MTU)
-		mtu = IPV6_MIN_MTU;
+	if (skb->protocol == htons(ETH_P_IPV6)) {
+		if (mtu < IPV6_MIN_MTU)
+			mtu = IPV6_MIN_MTU;
+	} else if (mtu < 576) {
+		mtu = 576;
+	}
+
 	if (skb_dst(skb) && !t->parms.collect_md)
 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index b9404feabd7857fe0873fbc4f346d281f3600807..2d4680e0376f41deee6c999eadaf9409353e0b4a 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -886,6 +886,7 @@ pref_skip_coa:
 		break;
 	case IPV6_AUTOFLOWLABEL:
 		np->autoflowlabel = valbool;
+		np->autoflowlabel_set = 1;
 		retv = 0;
 		break;
 	case IPV6_RECVFRAGSIZE:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7a8d1500d374b4089e623ed2b20d68110cff498e..0458b761f3c56ce765841e0a3a7e5e78f90b95eb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2336,6 +2336,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
 	}
 
 	rt->dst.flags |= DST_HOST;
+	rt->dst.input = ip6_input;
 	rt->dst.output  = ip6_output;
 	rt->rt6i_gateway  = fl6->daddr;
 	rt->rt6i_dst.addr = fl6->daddr;
@@ -4297,19 +4298,13 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 		if (!ipv6_addr_any(&fl6.saddr))
 			flags |= RT6_LOOKUP_F_HAS_SADDR;
 
-		if (!fibmatch)
-			dst = ip6_route_input_lookup(net, dev, &fl6, flags);
-		else
-			dst = ip6_route_lookup(net, &fl6, 0);
+		dst = ip6_route_input_lookup(net, dev, &fl6, flags);
 
 		rcu_read_unlock();
 	} else {
 		fl6.flowi6_oif = oif;
 
-		if (!fibmatch)
-			dst = ip6_route_output(net, NULL, &fl6);
-		else
-			dst = ip6_route_lookup(net, &fl6, 0);
+		dst = ip6_route_output(net, NULL, &fl6);
 	}
 
 
@@ -4326,6 +4321,15 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 		goto errout;
 	}
 
+	if (fibmatch && rt->dst.from) {
+		struct rt6_info *ort = container_of(rt->dst.from,
+						    struct rt6_info, dst);
+
+		dst_hold(&ort->dst);
+		ip6_rt_put(rt);
+		rt = ort;
+	}
+
 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
 	if (!skb) {
 		ip6_rt_put(rt);
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index fe04e23af9862557fde2a9c214faf3a10b7e5eda..841f4a07438e83502eadd6ec6c16a16d1de6aa55 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
 }
 EXPORT_SYMBOL(xfrm6_rcv_spi);
 
+static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
+				   struct sk_buff *skb)
+{
+	if (xfrm_trans_queue(skb, ip6_rcv_finish))
+		__kfree_skb(skb);
+	return -1;
+}
+
 int xfrm6_transport_finish(struct sk_buff *skb, int async)
 {
 	struct xfrm_offload *xo = xfrm_offload(skb);
@@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 
 	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
 		dev_net(skb->dev), NULL, skb, skb->dev, NULL,
-		ip6_rcv_finish);
+		xfrm6_transport_finish2);
 	return -1;
 }
 
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index dbe2379329c5517fb164b6024d40fabebe7855c8..f039064ce922f3aac8419dcda65ad875f89e966b 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -579,6 +579,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 			return -EINVAL;
 
 		skb_reset_network_header(skb);
+		key->eth.type = skb->protocol;
 	} else {
 		eth = eth_hdr(skb);
 		ether_addr_copy(key->eth.src, eth->h_source);
@@ -592,15 +593,23 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 		if (unlikely(parse_vlan(skb, key)))
 			return -ENOMEM;
 
-		skb->protocol = parse_ethertype(skb);
-		if (unlikely(skb->protocol == htons(0)))
+		key->eth.type = parse_ethertype(skb);
+		if (unlikely(key->eth.type == htons(0)))
 			return -ENOMEM;
 
+		/* Multiple tagged packets need to retain TPID to satisfy
+		 * skb_vlan_pop(), which will later shift the ethertype into
+		 * skb->protocol.
+		 */
+		if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
+			skb->protocol = key->eth.cvlan.tpid;
+		else
+			skb->protocol = key->eth.type;
+
 		skb_reset_network_header(skb);
 		__skb_push(skb, skb->data - skb_mac_header(skb));
 	}
 	skb_reset_mac_len(skb);
-	key->eth.type = skb->protocol;
 
 	/* Network layer. */
 	if (key->eth.type == htons(ETH_P_IP)) {
diff --git a/net/rds/send.c b/net/rds/send.c
index b52cdc8ae428819a5853509a06852ec2ebc43a5a..f72466c63f0c5657a2f44e11ae432dd1457991cf 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1009,6 +1009,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
 			continue;
 
 		if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
+			if (cmsg->cmsg_len <
+			    CMSG_LEN(sizeof(struct rds_rdma_args)))
+				return -EINVAL;
 			args = CMSG_DATA(cmsg);
 			*rdma_bytes += args->remote_vec.bytes;
 		}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b91ea03e3afa717225c00a3d2a03e9d722229fbc..b9d63d2246e667329c30606165dd485b9dc777aa 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -379,6 +379,8 @@ void tcf_block_put(struct tcf_block *block)
 {
 	struct tcf_block_ext_info ei = {0, };
 
+	if (!block)
+		return;
 	tcf_block_put_ext(block, block->q, &ei);
 }
 
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 6fe798c2df1a5303cd61cd3ad53cd2f9385d16de..8d78e7f4ecc33082517aaab5767a30c119f49dc0 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -42,7 +42,6 @@ struct cls_bpf_prog {
 	struct list_head link;
 	struct tcf_result res;
 	bool exts_integrated;
-	bool offloaded;
 	u32 gen_flags;
 	struct tcf_exts exts;
 	u32 handle;
@@ -148,33 +147,37 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
 }
 
 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
-			       enum tc_clsbpf_command cmd)
+			       struct cls_bpf_prog *oldprog)
 {
-	bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE;
 	struct tcf_block *block = tp->chain->block;
-	bool skip_sw = tc_skip_sw(prog->gen_flags);
 	struct tc_cls_bpf_offload cls_bpf = {};
+	struct cls_bpf_prog *obj;
+	bool skip_sw;
 	int err;
 
+	skip_sw = prog && tc_skip_sw(prog->gen_flags);
+	obj = prog ?: oldprog;
+
 	tc_cls_common_offload_init(&cls_bpf.common, tp);
-	cls_bpf.command = cmd;
-	cls_bpf.exts = &prog->exts;
-	cls_bpf.prog = prog->filter;
-	cls_bpf.name = prog->bpf_name;
-	cls_bpf.exts_integrated = prog->exts_integrated;
-	cls_bpf.gen_flags = prog->gen_flags;
+	cls_bpf.command = TC_CLSBPF_OFFLOAD;
+	cls_bpf.exts = &obj->exts;
+	cls_bpf.prog = prog ? prog->filter : NULL;
+	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
+	cls_bpf.name = obj->bpf_name;
+	cls_bpf.exts_integrated = obj->exts_integrated;
+	cls_bpf.gen_flags = obj->gen_flags;
 
 	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
-	if (addorrep) {
+	if (prog) {
 		if (err < 0) {
-			cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+			cls_bpf_offload_cmd(tp, oldprog, prog);
 			return err;
 		} else if (err > 0) {
 			prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
 		}
 	}
 
-	if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
+	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
 		return -EINVAL;
 
 	return 0;
@@ -183,38 +186,17 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
 			   struct cls_bpf_prog *oldprog)
 {
-	struct cls_bpf_prog *obj = prog;
-	enum tc_clsbpf_command cmd;
-	bool skip_sw;
-	int ret;
-
-	skip_sw = tc_skip_sw(prog->gen_flags) ||
-		(oldprog && tc_skip_sw(oldprog->gen_flags));
-
-	if (oldprog && oldprog->offloaded) {
-		if (!tc_skip_hw(prog->gen_flags)) {
-			cmd = TC_CLSBPF_REPLACE;
-		} else if (!tc_skip_sw(prog->gen_flags)) {
-			obj = oldprog;
-			cmd = TC_CLSBPF_DESTROY;
-		} else {
-			return -EINVAL;
-		}
-	} else {
-		if (tc_skip_hw(prog->gen_flags))
-			return skip_sw ? -EINVAL : 0;
-		cmd = TC_CLSBPF_ADD;
-	}
-
-	ret = cls_bpf_offload_cmd(tp, obj, cmd);
-	if (ret)
-		return ret;
+	if (prog && oldprog && prog->gen_flags != oldprog->gen_flags)
+		return -EINVAL;
 
-	obj->offloaded = true;
-	if (oldprog)
-		oldprog->offloaded = false;
+	if (prog && tc_skip_hw(prog->gen_flags))
+		prog = NULL;
+	if (oldprog && tc_skip_hw(oldprog->gen_flags))
+		oldprog = NULL;
+	if (!prog && !oldprog)
+		return 0;
 
-	return 0;
+	return cls_bpf_offload_cmd(tp, prog, oldprog);
 }
 
 static void cls_bpf_stop_offload(struct tcf_proto *tp,
@@ -222,25 +204,26 @@ static void cls_bpf_stop_offload(struct tcf_proto *tp,
 {
 	int err;
 
-	if (!prog->offloaded)
-		return;
-
-	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
-	if (err) {
+	err = cls_bpf_offload_cmd(tp, NULL, prog);
+	if (err)
 		pr_err("Stopping hardware offload failed: %d\n", err);
-		return;
-	}
-
-	prog->offloaded = false;
 }
 
 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
 					 struct cls_bpf_prog *prog)
 {
-	if (!prog->offloaded)
-		return;
+	struct tcf_block *block = tp->chain->block;
+	struct tc_cls_bpf_offload cls_bpf = {};
+
+	tc_cls_common_offload_init(&cls_bpf.common, tp);
+	cls_bpf.command = TC_CLSBPF_STATS;
+	cls_bpf.exts = &prog->exts;
+	cls_bpf.prog = prog->filter;
+	cls_bpf.name = prog->bpf_name;
+	cls_bpf.exts_integrated = prog->exts_integrated;
+	cls_bpf.gen_flags = prog->gen_flags;
 
-	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
+	tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
 }
 
 static int cls_bpf_init(struct tcf_proto *tp)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index cd1b200acae7415d5e26c8aa3dbfed602f5796b2..661c7144b53af048b3a65484777910e2d60f25aa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1040,6 +1040,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
 
 	if (!tp_head) {
 		RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
+		/* Wait for flying RCU callback before it is freed. */
+		rcu_barrier_bh();
 		return;
 	}
 
@@ -1055,7 +1057,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
 	rcu_assign_pointer(*miniqp->p_miniq, miniq);
 
 	if (miniq_old)
-		/* This is counterpart of the rcu barrier above. We need to
+		/* This is counterpart of the rcu barriers above. We need to
 		 * block potential new user of miniq_old until all readers
 		 * are not seeing it.
 		 */
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 3f619fdcbf0a0b4a6f35ece8021c011f874a2d79..291c97b07058218635fcfcd06214aa79d74ec80d 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -78,6 +78,9 @@ const char *sctp_cname(const union sctp_subtype cid)
 	case SCTP_CID_AUTH:
 		return "AUTH";
 
+	case SCTP_CID_RECONF:
+		return "RECONF";
+
 	default:
 		break;
 	}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3253f724a995256084dcb1f6610de3384b475e79..b4fb6e4886d264f302fc67b8d822cd8bdb5f3f41 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4498,7 +4498,7 @@ static int sctp_init_sock(struct sock *sk)
 	SCTP_DBG_OBJCNT_INC(sock);
 
 	local_bh_disable();
-	percpu_counter_inc(&sctp_sockets_allocated);
+	sk_sockets_allocated_inc(sk);
 	sock_prot_inuse_add(net, sk->sk_prot, 1);
 
 	/* Nothing can fail after this block, otherwise
@@ -4542,7 +4542,7 @@ static void sctp_destroy_sock(struct sock *sk)
 	}
 	sctp_endpoint_free(sp->ep);
 	local_bh_disable();
-	percpu_counter_dec(&sctp_sockets_allocated);
+	sk_sockets_allocated_dec(sk);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 	local_bh_enable();
 }
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index a71be33f3afeb0aaaef174ee082c4c547aab1e2d..e36ec5dd64c6ff969fc30aae893d1d5ca8c221bf 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1084,29 +1084,21 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 		      gfp_t gfp)
 {
-	struct sctp_association *asoc;
-	__u16 needed, freed;
-
-	asoc = ulpq->asoc;
+	struct sctp_association *asoc = ulpq->asoc;
+	__u32 freed = 0;
+	__u16 needed;
 
-	if (chunk) {
-		needed = ntohs(chunk->chunk_hdr->length);
-		needed -= sizeof(struct sctp_data_chunk);
-	} else
-		needed = SCTP_DEFAULT_MAXWINDOW;
-
-	freed = 0;
+	needed = ntohs(chunk->chunk_hdr->length) -
+		 sizeof(struct sctp_data_chunk);
 
 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
 		freed = sctp_ulpq_renege_order(ulpq, needed);
-		if (freed < needed) {
+		if (freed < needed)
 			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
-		}
 	}
 	/* If able to free enough room, accept this chunk. */
-	if (chunk && (freed >= needed)) {
-		int retval;
-		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
+	if (freed >= needed) {
+		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
 		/*
 		 * Enter partial delivery if chunk has not been
 		 * delivered; otherwise, drain the reassembly queue.
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index c5fda15ba3193f811151043ac3675a2ebfb15c38..1fdab5c4eda8c2a218448abb9bc461cf6474615c 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -401,7 +401,7 @@ void strp_data_ready(struct strparser *strp)
 	 * allows a thread in BH context to safely check if the process
 	 * lock is held. In this case, if the lock is held, queue work.
 	 */
-	if (sock_owned_by_user(strp->sk)) {
+	if (sock_owned_by_user_nocheck(strp->sk)) {
 		queue_work(strp_wq, &strp->work);
 		return;
 	}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 47ec121574ce4ef95850f688d85b50eff766a710..c8001471da6c3c53be6c63dde1311302b093f415 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -324,6 +324,7 @@ restart:
 	if (res) {
 		pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
 			name, -res);
+		kfree(b);
 		return -EINVAL;
 	}
 
@@ -347,8 +348,10 @@ restart:
 	if (skb)
 		tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
 
-	if (tipc_mon_create(net, bearer_id))
+	if (tipc_mon_create(net, bearer_id)) {
+		bearer_disable(net, b);
 		return -ENOMEM;
+	}
 
 	pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
 		name,
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 95fec2c057d6ebdb223e19ef83bf9c383cb2156e..8e12ab55346b0cf45b9244c470b36c5f81db30d2 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -351,8 +351,7 @@ void tipc_group_update_member(struct tipc_member *m, int len)
 	if (m->window >= ADV_IDLE)
 		return;
 
-	if (!list_empty(&m->congested))
-		return;
+	list_del_init(&m->congested);
 
 	/* Sort member into congested members' list */
 	list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
@@ -369,18 +368,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
 	u16 prev = grp->bc_snd_nxt - 1;
 	struct tipc_member *m;
 	struct rb_node *n;
+	u16 ackers = 0;
 
 	for (n = rb_first(&grp->members); n; n = rb_next(n)) {
 		m = container_of(n, struct tipc_member, tree_node);
 		if (tipc_group_is_enabled(m)) {
 			tipc_group_update_member(m, len);
 			m->bc_acked = prev;
+			ackers++;
 		}
 	}
 
 	/* Mark number of acknowledges to expect, if any */
 	if (ack)
-		grp->bc_ackers = grp->member_cnt;
+		grp->bc_ackers = ackers;
 	grp->bc_snd_nxt++;
 }
 
@@ -648,6 +649,7 @@ static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
 	} else if (mtyp == GRP_REMIT_MSG) {
 		msg_set_grp_remitted(hdr, m->window);
 	}
+	msg_set_dest_droppable(hdr, true);
 	__skb_queue_tail(xmitq, skb);
 }
 
@@ -689,15 +691,16 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 			msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
 			__skb_queue_tail(inputq, m->event_msg);
 		}
-		if (m->window < ADV_IDLE)
-			tipc_group_update_member(m, 0);
-		else
-			list_del_init(&m->congested);
+		list_del_init(&m->congested);
+		tipc_group_update_member(m, 0);
 		return;
 	case GRP_LEAVE_MSG:
 		if (!m)
 			return;
 		m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+		list_del_init(&m->list);
+		list_del_init(&m->congested);
+		*usr_wakeup = true;
 
 		/* Wait until WITHDRAW event is received */
 		if (m->state != MBR_LEAVING) {
@@ -709,8 +712,6 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
 		ehdr = buf_msg(m->event_msg);
 		msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
 		__skb_queue_tail(inputq, m->event_msg);
-		*usr_wakeup = true;
-		list_del_init(&m->congested);
 		return;
 	case GRP_ADV_MSG:
 		if (!m)
@@ -849,19 +850,29 @@ void tipc_group_member_evt(struct tipc_group *grp,
 		*usr_wakeup = true;
 		m->usr_pending = false;
 		node_up = tipc_node_is_up(net, node);
-
-		/* Hold back event if more messages might be expected */
-		if (m->state != MBR_LEAVING && node_up) {
-			m->event_msg = skb;
-			tipc_group_decr_active(grp, m);
-			m->state = MBR_LEAVING;
-		} else {
-			if (node_up)
+		m->event_msg = NULL;
+
+		if (node_up) {
+			/* Hold back event if a LEAVE msg should be expected */
+			if (m->state != MBR_LEAVING) {
+				m->event_msg = skb;
+				tipc_group_decr_active(grp, m);
+				m->state = MBR_LEAVING;
+			} else {
 				msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
-			else
+				__skb_queue_tail(inputq, skb);
+			}
+		} else {
+			if (m->state != MBR_LEAVING) {
+				tipc_group_decr_active(grp, m);
+				m->state = MBR_LEAVING;
 				msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
+			} else {
+				msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
+			}
 			__skb_queue_tail(inputq, skb);
 		}
+		list_del_init(&m->list);
 		list_del_init(&m->congested);
 	}
 	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 8e884ed06d4b13d9751c27a51959b216b7f48b18..32dc33a94bc714f762a066389a4907e558244cd7 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
 {
 	struct tipc_net *tn = tipc_net(net);
 	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
-	struct tipc_peer *self = get_self(net, bearer_id);
+	struct tipc_peer *self;
 	struct tipc_peer *peer, *tmp;
 
+	if (!mon)
+		return;
+
+	self = get_self(net, bearer_id);
 	write_lock_bh(&mon->lock);
 	tn->monitors[bearer_id] = NULL;
 	list_for_each_entry_safe(peer, tmp, &self->list, list) {
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 41127d0b925ea4d515e7c7bbe6739dee99a442f2..3b408448037769d3dba2da38fc00cdaf360e4950 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -727,11 +727,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 
 	switch (sk->sk_state) {
 	case TIPC_ESTABLISHED:
+	case TIPC_CONNECTING:
 		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
 			revents |= POLLOUT;
 		/* fall thru' */
 	case TIPC_LISTEN:
-	case TIPC_CONNECTING:
 		if (!skb_queue_empty(&sk->sk_receive_queue))
 			revents |= POLLIN | POLLRDNORM;
 		break;
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index d7d6cb00c47bbab3c963f4b83839aadcc94606a1..1d84f91bbfb0c8c9087e309821eb687325733358 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -23,27 +23,14 @@ ifneq ($(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR),)
 cfg80211-y += extra-certs.o
 endif
 
-$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509)
+$(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
 	@$(kecho) "  GEN     $@"
-	@(set -e; \
-	  allf=""; \
-	  for f in $^ ; do \
-	      # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \
-	      thisf=$$(od -An -v -tx1 < $$f | \
-	                   sed -e 's/ /\n/g' | \
-	                   sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \
-	                   sed -e 's/^/0x/;s/$$/,/'); \
-	      # file should not be empty - maybe command substitution failed? \
-	      test ! -z "$$thisf";\
-	      allf=$$allf$$thisf;\
-	  done; \
-	  ( \
-	      echo '#include "reg.h"'; \
-	      echo 'const u8 shipped_regdb_certs[] = {'; \
-	      echo "$$allf"; \
-	      echo '};'; \
-	      echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
-	  ) >> $@)
+	@(echo '#include "reg.h"'; \
+	  echo 'const u8 shipped_regdb_certs[] = {'; \
+	  cat $^ ; \
+	  echo '};'; \
+	  echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
+	 ) > $@
 
 $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
 		      $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509)
@@ -66,4 +53,6 @@ $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \
 	      echo "$$allf"; \
 	      echo '};'; \
 	      echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \
-	  ) >> $@)
+	  ) > $@)
+
+clean-files += shipped-certs.c extra-certs.c
diff --git a/net/wireless/certs/sforshee.hex b/net/wireless/certs/sforshee.hex
new file mode 100644
index 0000000000000000000000000000000000000000..14ea66643ffaa2f8463bd8a699b5c2074bc7d83a
--- /dev/null
+++ b/net/wireless/certs/sforshee.hex
@@ -0,0 +1,86 @@
+/* Seth Forshee's regdb certificate */
+0x30, 0x82, 0x02, 0xa4, 0x30, 0x82, 0x01, 0x8c,
+0x02, 0x09, 0x00, 0xb2, 0x8d, 0xdf, 0x47, 0xae,
+0xf9, 0xce, 0xa7, 0x30, 0x0d, 0x06, 0x09, 0x2a,
+0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
+0x05, 0x00, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f,
+0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73,
+0x66, 0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30,
+0x20, 0x17, 0x0d, 0x31, 0x37, 0x31, 0x30, 0x30,
+0x36, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35, 0x5a,
+0x18, 0x0f, 0x32, 0x31, 0x31, 0x37, 0x30, 0x39,
+0x31, 0x32, 0x31, 0x39, 0x34, 0x30, 0x33, 0x35,
+0x5a, 0x30, 0x13, 0x31, 0x11, 0x30, 0x0f, 0x06,
+0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, 0x73, 0x66,
+0x6f, 0x72, 0x73, 0x68, 0x65, 0x65, 0x30, 0x82,
+0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86,
+0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05,
+0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82,
+0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb5,
+0x40, 0xe3, 0x9c, 0x28, 0x84, 0x39, 0x03, 0xf2,
+0x39, 0xd7, 0x66, 0x2c, 0x41, 0x38, 0x15, 0xac,
+0x7e, 0xa5, 0x83, 0x71, 0x25, 0x7e, 0x90, 0x7c,
+0x68, 0xdd, 0x6f, 0x3f, 0xd9, 0xd7, 0x59, 0x38,
+0x9f, 0x7c, 0x6a, 0x52, 0xc2, 0x03, 0x2a, 0x2d,
+0x7e, 0x66, 0xf4, 0x1e, 0xb3, 0x12, 0x70, 0x20,
+0x5b, 0xd4, 0x97, 0x32, 0x3d, 0x71, 0x8b, 0x3b,
+0x1b, 0x08, 0x17, 0x14, 0x6b, 0x61, 0xc4, 0x57,
+0x8b, 0x96, 0x16, 0x1c, 0xfd, 0x24, 0xd5, 0x0b,
+0x09, 0xf9, 0x68, 0x11, 0x84, 0xfb, 0xca, 0x51,
+0x0c, 0xd1, 0x45, 0x19, 0xda, 0x10, 0x44, 0x8a,
+0xd9, 0xfe, 0x76, 0xa9, 0xfd, 0x60, 0x2d, 0x18,
+0x0b, 0x28, 0x95, 0xb2, 0x2d, 0xea, 0x88, 0x98,
+0xb8, 0xd1, 0x56, 0x21, 0xf0, 0x53, 0x1f, 0xf1,
+0x02, 0x6f, 0xe9, 0x46, 0x9b, 0x93, 0x5f, 0x28,
+0x90, 0x0f, 0xac, 0x36, 0xfa, 0x68, 0x23, 0x71,
+0x57, 0x56, 0xf6, 0xcc, 0xd3, 0xdf, 0x7d, 0x2a,
+0xd9, 0x1b, 0x73, 0x45, 0xeb, 0xba, 0x27, 0x85,
+0xef, 0x7a, 0x7f, 0xa5, 0xcb, 0x80, 0xc7, 0x30,
+0x36, 0xd2, 0x53, 0xee, 0xec, 0xac, 0x1e, 0xe7,
+0x31, 0xf1, 0x36, 0xa2, 0x9c, 0x63, 0xc6, 0x65,
+0x5b, 0x7f, 0x25, 0x75, 0x68, 0xa1, 0xea, 0xd3,
+0x7e, 0x00, 0x5c, 0x9a, 0x5e, 0xd8, 0x20, 0x18,
+0x32, 0x77, 0x07, 0x29, 0x12, 0x66, 0x1e, 0x36,
+0x73, 0xe7, 0x97, 0x04, 0x41, 0x37, 0xb1, 0xb1,
+0x72, 0x2b, 0xf4, 0xa1, 0x29, 0x20, 0x7c, 0x96,
+0x79, 0x0b, 0x2b, 0xd0, 0xd8, 0xde, 0xc8, 0x6c,
+0x3f, 0x93, 0xfb, 0xc5, 0xee, 0x78, 0x52, 0x11,
+0x15, 0x1b, 0x7a, 0xf6, 0xe2, 0x68, 0x99, 0xe7,
+0xfb, 0x46, 0x16, 0x84, 0xe3, 0xc7, 0xa1, 0xe6,
+0xe0, 0xd2, 0x46, 0xd5, 0xe1, 0xc4, 0x5f, 0xa0,
+0x66, 0xf4, 0xda, 0xc4, 0xff, 0x95, 0x1d, 0x02,
+0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09,
+0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
+0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00,
+0x87, 0x03, 0xda, 0xf2, 0x82, 0xc2, 0xdd, 0xaf,
+0x7c, 0x44, 0x2f, 0x86, 0xd3, 0x5f, 0x4c, 0x93,
+0x48, 0xb9, 0xfe, 0x07, 0x17, 0xbb, 0x21, 0xf7,
+0x25, 0x23, 0x4e, 0xaa, 0x22, 0x0c, 0x16, 0xb9,
+0x73, 0xae, 0x9d, 0x46, 0x7c, 0x75, 0xd9, 0xc3,
+0x49, 0x57, 0x47, 0xbf, 0x33, 0xb7, 0x97, 0xec,
+0xf5, 0x40, 0x75, 0xc0, 0x46, 0x22, 0xf0, 0xa0,
+0x5d, 0x9c, 0x79, 0x13, 0xa1, 0xff, 0xb8, 0xa3,
+0x2f, 0x7b, 0x8e, 0x06, 0x3f, 0xc8, 0xb6, 0xe4,
+0x6a, 0x28, 0xf2, 0x34, 0x5c, 0x23, 0x3f, 0x32,
+0xc0, 0xe6, 0xad, 0x0f, 0xac, 0xcf, 0x55, 0x74,
+0x47, 0x73, 0xd3, 0x01, 0x85, 0xb7, 0x0b, 0x22,
+0x56, 0x24, 0x7d, 0x9f, 0x09, 0xa9, 0x0e, 0x86,
+0x9e, 0x37, 0x5b, 0x9c, 0x6d, 0x02, 0xd9, 0x8c,
+0xc8, 0x50, 0x6a, 0xe2, 0x59, 0xf3, 0x16, 0x06,
+0xea, 0xb2, 0x42, 0xb5, 0x58, 0xfe, 0xba, 0xd1,
+0x81, 0x57, 0x1a, 0xef, 0xb2, 0x38, 0x88, 0x58,
+0xf6, 0xaa, 0xc4, 0x2e, 0x8b, 0x5a, 0x27, 0xe4,
+0xa5, 0xe8, 0xa4, 0xca, 0x67, 0x5c, 0xac, 0x72,
+0x67, 0xc3, 0x6f, 0x13, 0xc3, 0x2d, 0x35, 0x79,
+0xd7, 0x8a, 0xe7, 0xf5, 0xd4, 0x21, 0x30, 0x4a,
+0xd5, 0xf6, 0xa3, 0xd9, 0x79, 0x56, 0xf2, 0x0f,
+0x10, 0xf7, 0x7d, 0xd0, 0x51, 0x93, 0x2f, 0x47,
+0xf8, 0x7d, 0x4b, 0x0a, 0x84, 0x55, 0x12, 0x0a,
+0x7d, 0x4e, 0x3b, 0x1f, 0x2b, 0x2f, 0xfc, 0x28,
+0xb3, 0x69, 0x34, 0xe1, 0x80, 0x80, 0xbb, 0xe2,
+0xaf, 0xb9, 0xd6, 0x30, 0xf1, 0x1d, 0x54, 0x87,
+0x23, 0x99, 0x9f, 0x51, 0x03, 0x4c, 0x45, 0x7d,
+0x02, 0x65, 0x73, 0xab, 0xfd, 0xcf, 0x94, 0xcc,
+0x0d, 0x3a, 0x60, 0xfd, 0x3c, 0x14, 0x2f, 0x16,
+0x33, 0xa9, 0x21, 0x1f, 0xcb, 0x50, 0xb1, 0x8f,
+0x03, 0xee, 0xa0, 0x66, 0xa9, 0x16, 0x79, 0x14,
diff --git a/net/wireless/certs/sforshee.x509 b/net/wireless/certs/sforshee.x509
deleted file mode 100644
index c6f8f9d6b98839048822ebbe27ecb831614ccf3d..0000000000000000000000000000000000000000
Binary files a/net/wireless/certs/sforshee.x509 and /dev/null differ
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index b1ac23ca20c86be0af71e9a1ba92cc99d8d5a967..213d0c498c97d78b17c81d1fd8b850c8768f7057 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2610,7 +2610,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 	case NL80211_IFTYPE_AP:
 		if (wdev->ssid_len &&
 		    nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
-			goto nla_put_failure;
+			goto nla_put_failure_locked;
 		break;
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_P2P_CLIENT:
@@ -2623,7 +2623,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 		if (!ssid_ie)
 			break;
 		if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
-			goto nla_put_failure;
+			goto nla_put_failure_locked;
 		break;
 		}
 	default:
@@ -2635,6 +2635,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 	genlmsg_end(msg, hdr);
 	return 0;
 
+ nla_put_failure_locked:
+	wdev_unlock(wdev);
  nla_put_failure:
 	genlmsg_cancel(msg, hdr);
 	return -EMSGSIZE;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 347ab31574d509ac9edd448bcbb7501a05f5ac2d..3f6f6f8c9fa5224e75c1b62f299f217da172d370 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -8,15 +8,29 @@
  *
  */
 
+#include <linux/bottom_half.h>
+#include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <linux/percpu.h>
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
 #include <net/ip_tunnels.h>
 #include <net/ip6_tunnel.h>
 
+struct xfrm_trans_tasklet {
+	struct tasklet_struct tasklet;
+	struct sk_buff_head queue;
+};
+
+struct xfrm_trans_cb {
+	int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
+};
+
+#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
+
 static struct kmem_cache *secpath_cachep __read_mostly;
 
 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
@@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
 static struct gro_cells gro_cells;
 static struct net_device xfrm_napi_dev;
 
+static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
+
 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
 {
 	int err = 0;
@@ -207,7 +223,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 	xfrm_address_t *daddr;
 	struct xfrm_mode *inner_mode;
 	u32 mark = skb->mark;
-	unsigned int family;
+	unsigned int family = AF_UNSPEC;
 	int decaps = 0;
 	int async = 0;
 	bool xfrm_gro = false;
@@ -216,6 +232,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
 	if (encap_type < 0) {
 		x = xfrm_input_state(skb);
+
+		if (unlikely(x->km.state != XFRM_STATE_VALID)) {
+			if (x->km.state == XFRM_STATE_ACQ)
+				XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+			else
+				XFRM_INC_STATS(net,
+					       LINUX_MIB_XFRMINSTATEINVALID);
+			goto drop;
+		}
+
 		family = x->outer_mode->afinfo->family;
 
 		/* An encap_type of -1 indicates async resumption. */
@@ -467,9 +493,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
 }
 EXPORT_SYMBOL(xfrm_input_resume);
 
+static void xfrm_trans_reinject(unsigned long data)
+{
+	struct xfrm_trans_tasklet *trans = (void *)data;
+	struct sk_buff_head queue;
+	struct sk_buff *skb;
+
+	__skb_queue_head_init(&queue);
+	skb_queue_splice_init(&trans->queue, &queue);
+
+	while ((skb = __skb_dequeue(&queue)))
+		XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb);
+}
+
+int xfrm_trans_queue(struct sk_buff *skb,
+		     int (*finish)(struct net *, struct sock *,
+				   struct sk_buff *))
+{
+	struct xfrm_trans_tasklet *trans;
+
+	trans = this_cpu_ptr(&xfrm_trans_tasklet);
+
+	if (skb_queue_len(&trans->queue) >= netdev_max_backlog)
+		return -ENOBUFS;
+
+	XFRM_TRANS_SKB_CB(skb)->finish = finish;
+	skb_queue_tail(&trans->queue, skb);
+	tasklet_schedule(&trans->tasklet);
+	return 0;
+}
+EXPORT_SYMBOL(xfrm_trans_queue);
+
 void __init xfrm_input_init(void)
 {
 	int err;
+	int i;
 
 	init_dummy_netdev(&xfrm_napi_dev);
 	err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
@@ -480,4 +538,13 @@ void __init xfrm_input_init(void)
 					   sizeof(struct sec_path),
 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
 					   NULL);
+
+	for_each_possible_cpu(i) {
+		struct xfrm_trans_tasklet *trans;
+
+		trans = &per_cpu(xfrm_trans_tasklet, i);
+		__skb_queue_head_init(&trans->queue);
+		tasklet_init(&trans->tasklet, xfrm_trans_reinject,
+			     (unsigned long)trans);
+	}
 }
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9542975eb2f90dcb2bae894edeb9b418d04f252e..70aa5cb0c659d54eacb85f92dc95b2db17bfe1d0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1168,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
  again:
 	pol = rcu_dereference(sk->sk_policy[dir]);
 	if (pol != NULL) {
-		bool match = xfrm_selector_match(&pol->selector, fl, family);
+		bool match;
 		int err = 0;
 
+		if (pol->family != family) {
+			pol = NULL;
+			goto out;
+		}
+
+		match = xfrm_selector_match(&pol->selector, fl, family);
 		if (match) {
 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
 				pol = NULL;
@@ -1833,6 +1839,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
 		   sizeof(struct xfrm_policy *) * num_pols) == 0 &&
 	    xfrm_xdst_can_reuse(xdst, xfrm, err)) {
 		dst_hold(&xdst->u.dst);
+		xfrm_pols_put(pols, num_pols);
 		while (err > 0)
 			xfrm_state_put(xfrm[--err]);
 		return xdst;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 065d89606888ec1bf053577d3949746bcea6f099..500b3391f474b96fe273060ff8eae16f1e23f3c2 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1343,6 +1343,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
 
 	if (orig->aead) {
 		x->aead = xfrm_algo_aead_clone(orig->aead);
+		x->geniv = orig->geniv;
 		if (!x->aead)
 			goto error;
 	}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 983b0233767bec16ba55b763ef82ce781ca5046a..bdb48e5dba0480aa4c3c6855be42cfb93f3bf335 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1419,11 +1419,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
 
 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 {
+	u16 prev_family;
 	int i;
 
 	if (nr > XFRM_MAX_DEPTH)
 		return -EINVAL;
 
+	prev_family = family;
+
 	for (i = 0; i < nr; i++) {
 		/* We never validated the ut->family value, so many
 		 * applications simply leave it at zero.  The check was
@@ -1435,6 +1438,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 		if (!ut[i].family)
 			ut[i].family = family;
 
+		if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
+		    (ut[i].family != prev_family))
+			return -EINVAL;
+
+		prev_family = ut[i].family;
+
 		switch (ut[i].family) {
 		case AF_INET:
 			break;
@@ -1445,6 +1454,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
 		default:
 			return -EINVAL;
 		}
+
+		switch (ut[i].id.proto) {
+		case IPPROTO_AH:
+		case IPPROTO_ESP:
+		case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+		case IPPROTO_ROUTING:
+		case IPPROTO_DSTOPTS:
+#endif
+		case IPSEC_PROTO_ANY:
+			break;
+		default:
+			return -EINVAL;
+		}
+
 	}
 
 	return 0;
@@ -2470,7 +2494,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
 	[XFRMA_PROTO]		= { .type = NLA_U8 },
 	[XFRMA_ADDRESS_FILTER]	= { .len = sizeof(struct xfrm_address_filter) },
 	[XFRMA_OFFLOAD_DEV]	= { .len = sizeof(struct xfrm_user_offload) },
-	[XFRMA_OUTPUT_MARK]	= { .len = NLA_U32 },
+	[XFRMA_OUTPUT_MARK]	= { .type = NLA_U32 },
 };
 
 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
diff --git a/security/Kconfig b/security/Kconfig
index e8e449444e658be4a9190c6ea2de14cca8fc4890..a623d13bf2884afdc6fdcdbe656811b7d31b2951 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -54,6 +54,16 @@ config SECURITY_NETWORK
 	  implement socket and networking access controls.
 	  If you are unsure how to answer this question, answer N.
 
+config PAGE_TABLE_ISOLATION
+	bool "Remove the kernel mapping in user mode"
+	depends on X86_64 && !UML
+	help
+	  This feature reduces the number of hardware side channels by
+	  ensuring that the majority of kernel addresses are not mapped
+	  into userspace.
+
+	  See Documentation/x86/pagetable-isolation.txt for more details.
+
 config SECURITY_INFINIBAND
 	bool "Infiniband Security Hooks"
 	depends on SECURITY && INFINIBAND
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 31b510c5ca0bdf3cfef06bdbc47700e4f8c3ca73..0daf31383084d22a85553970b9480a4287515d5b 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -146,7 +146,7 @@ int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
 	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
 	if (!edev)
 		return -ENOMEM;
-	hdev = &edev->hdac;
+	hdev = &edev->hdev;
 	edev->ebus = ebus;
 
 	snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index d22758165496c966e7797e5ac2a2b446cd21d972..84c3582f3982785c1408fae5435f4c779dea2359 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -71,6 +71,7 @@ source "sound/soc/stm/Kconfig"
 source "sound/soc/sunxi/Kconfig"
 source "sound/soc/tegra/Kconfig"
 source "sound/soc/txx9/Kconfig"
+source "sound/soc/uniphier/Kconfig"
 source "sound/soc/ux500/Kconfig"
 source "sound/soc/xtensa/Kconfig"
 source "sound/soc/zte/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 5327f4d6c668b6e6180849cd60e5d7de969089e1..74cd1858d38b4c28b53d21b4ba8e1aed9728f9a4 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_SND_SOC)	+= stm/
 obj-$(CONFIG_SND_SOC)	+= sunxi/
 obj-$(CONFIG_SND_SOC)	+= tegra/
 obj-$(CONFIG_SND_SOC)	+= txx9/
+obj-$(CONFIG_SND_SOC)	+= uniphier/
 obj-$(CONFIG_SND_SOC)	+= ux500/
 obj-$(CONFIG_SND_SOC)	+= xtensa/
 obj-$(CONFIG_SND_SOC)	+= zte/
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index b5e41df6bb3a895f10a90756fab0e54954a685d7..c33a512283a48fecfe52a7ca3086dc7b30376e29 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -850,6 +850,9 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct audio_substream_data *rtd = runtime->private_data;
 
+	if (!rtd)
+		return -EINVAL;
+
 	buffersize = frames_to_bytes(runtime, runtime->buffer_size);
 	bytescount = acp_get_byte_count(rtd->acp_mmio, substream->stream);
 
@@ -875,6 +878,8 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct audio_substream_data *rtd = runtime->private_data;
 
+	if (!rtd)
+		return -EINVAL;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		config_acp_dma_channel(rtd->acp_mmio, SYSRAM_TO_ACP_CH_NUM,
 					PLAYBACK_START_DMA_DESCR_CH12,
@@ -1091,7 +1096,11 @@ static int acp_audio_probe(struct platform_device *pdev)
 	dev_set_drvdata(&pdev->dev, audio_drv_data);
 
 	/* Initialize the ACP */
-	acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+	status = acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+	if (status) {
+		dev_err(&pdev->dev, "ACP Init failed status:%d\n", status);
+		return status;
+	}
 
 	status = snd_soc_register_platform(&pdev->dev, &acp_asoc_platform);
 	if (status != 0) {
@@ -1108,9 +1117,12 @@ static int acp_audio_probe(struct platform_device *pdev)
 
 static int acp_audio_remove(struct platform_device *pdev)
 {
+	int status;
 	struct audio_drv_data *adata = dev_get_drvdata(&pdev->dev);
 
-	acp_deinit(adata->acp_mmio);
+	status = acp_deinit(adata->acp_mmio);
+	if (status)
+		dev_err(&pdev->dev, "ACP Deinit failed status:%d\n", status);
 	snd_soc_unregister_platform(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 
@@ -1120,9 +1132,14 @@ static int acp_audio_remove(struct platform_device *pdev)
 static int acp_pcm_resume(struct device *dev)
 {
 	u16 bank;
+	int status;
 	struct audio_drv_data *adata = dev_get_drvdata(dev);
 
-	acp_init(adata->acp_mmio, adata->asic_type);
+	status = acp_init(adata->acp_mmio, adata->asic_type);
+	if (status) {
+		dev_err(dev, "ACP Init failed status:%d\n", status);
+		return status;
+	}
 
 	if (adata->play_stream && adata->play_stream->runtime) {
 		/* For Stoney, Memory gating is disabled,i.e SRAM Banks
@@ -1154,18 +1171,26 @@ static int acp_pcm_resume(struct device *dev)
 
 static int acp_pcm_runtime_suspend(struct device *dev)
 {
+	int status;
 	struct audio_drv_data *adata = dev_get_drvdata(dev);
 
-	acp_deinit(adata->acp_mmio);
+	status = acp_deinit(adata->acp_mmio);
+	if (status)
+		dev_err(dev, "ACP Deinit failed status:%d\n", status);
 	acp_reg_write(0, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
 	return 0;
 }
 
 static int acp_pcm_runtime_resume(struct device *dev)
 {
+	int status;
 	struct audio_drv_data *adata = dev_get_drvdata(dev);
 
-	acp_init(adata->acp_mmio, adata->asic_type);
+	status = acp_init(adata->acp_mmio, adata->asic_type);
+	if (status) {
+		dev_err(dev, "ACP Init failed status:%d\n", status);
+		return status;
+	}
 	acp_reg_write(1, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
 	return 0;
 }
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 8445edd067375d5349b0bba3bc5529c74f15d35c..ebabed69f0e67d3c74bffdfee4726acb8044c51f 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -308,15 +308,9 @@ static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
 	return regcache_sync(dd->regmap);
 }
 
-static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
-{
-	return dev_get_regmap(dev, NULL);
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_classd = {
 	.probe		= atmel_classd_codec_probe,
 	.resume		= atmel_classd_codec_resume,
-	.get_regmap	= atmel_classd_codec_get_remap,
 	.component_driver = {
 		.controls		= atmel_classd_snd_controls,
 		.num_controls		= ARRAY_SIZE(atmel_classd_snd_controls),
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index bbf7a9266a991a0479d990ab803b9595fa2ffe2c..cd5a939ad6081a2394f3acbc5a030a99d5790310 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -365,7 +365,7 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
 {
 	struct ep93xx_ac97_info *info;
 	struct resource *res;
-	unsigned int irq;
+	int irq;
 	int ret;
 
 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -378,8 +378,8 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
 		return PTR_ERR(info->regs);
 
 	irq = platform_get_irq(pdev, 0);
-	if (!irq)
-		return -ENODEV;
+	if (irq <= 0)
+		return irq < 0 ? irq : -ENODEV;
 
 	ret = devm_request_irq(&pdev->dev, irq, ep93xx_ac97_interrupt,
 			       IRQF_TRIGGER_HIGH, pdev->name, info);
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 848c5fe49bc7a46592c80086dd6a77974d0fd795..be8ea723dff9bf75ee071ec12766dacb0368c957 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1319,6 +1319,7 @@ static int pm860x_probe(struct snd_soc_codec *codec)
 	int i, ret;
 
 	pm860x->codec = codec;
+	snd_soc_codec_init_regmap(codec,  pm860x->regmap);
 
 	for (i = 0; i < 4; i++) {
 		ret = request_threaded_irq(pm860x->irq[i], NULL,
@@ -1348,18 +1349,10 @@ static int pm860x_remove(struct snd_soc_codec *codec)
 	return 0;
 }
 
-static struct regmap *pm860x_get_regmap(struct device *dev)
-{
-	struct pm860x_priv *pm860x = dev_get_drvdata(dev);
-
-	return pm860x->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_pm860x = {
 	.probe		= pm860x_probe,
 	.remove		= pm860x_remove,
 	.set_bias_level	= pm860x_set_bias_level,
-	.get_regmap	= pm860x_get_regmap,
 
 	.component_driver = {
 		.controls		= pm860x_snd_controls,
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index a42ddbc93f3df1e52bd3f8f188779ddfe198ed1b..b228cc13191a7a275b0c1d6742d1a452103d7751 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -95,6 +95,7 @@ config SND_SOC_ALL_CODECS
 	select SND_SOC_MAX98925 if I2C
 	select SND_SOC_MAX98926 if I2C
 	select SND_SOC_MAX98927 if I2C
+	select SND_SOC_MAX98373 if I2C
 	select SND_SOC_MAX9850 if I2C
 	select SND_SOC_MAX9860 if I2C
 	select SND_SOC_MAX9768 if I2C
@@ -109,6 +110,8 @@ config SND_SOC_ALL_CODECS
 	select SND_SOC_PCM1681 if I2C
 	select SND_SOC_PCM179X_I2C if I2C
 	select SND_SOC_PCM179X_SPI if SPI_MASTER
+	select SND_SOC_PCM186X_I2C if I2C
+	select SND_SOC_PCM186X_SPI if SPI_MASTER
 	select SND_SOC_PCM3008
 	select SND_SOC_PCM3168A_I2C if I2C
 	select SND_SOC_PCM3168A_SPI if SPI_MASTER
@@ -148,6 +151,7 @@ config SND_SOC_ALL_CODECS
 	select SND_SOC_TAS5086 if I2C
 	select SND_SOC_TAS571X if I2C
 	select SND_SOC_TAS5720 if I2C
+	select SND_SOC_TAS6424 if I2C
 	select SND_SOC_TFA9879 if I2C
 	select SND_SOC_TLV320AIC23_I2C if I2C
 	select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
@@ -158,6 +162,7 @@ config SND_SOC_ALL_CODECS
 	select SND_SOC_TLV320AIC3X if I2C
 	select SND_SOC_TPA6130A2 if I2C
 	select SND_SOC_TLV320DAC33 if I2C
+	select SND_SOC_TSCS42XX if I2C
 	select SND_SOC_TS3A227E if I2C
 	select SND_SOC_TWL4030 if TWL4030_CORE
 	select SND_SOC_TWL6040 if TWL6040_CORE
@@ -623,6 +628,10 @@ config SND_SOC_MAX98927
 	tristate "Maxim Integrated MAX98927 Speaker Amplifier"
 	depends on I2C
 
+config SND_SOC_MAX98373
+	tristate "Maxim Integrated MAX98373 Speaker Amplifier"
+	depends on I2C
+
 config SND_SOC_MAX9850
 	tristate
 
@@ -661,6 +670,21 @@ config SND_SOC_PCM179X_SPI
 	  Enable support for Texas Instruments PCM179x CODEC.
 	  Select this if your PCM179x is connected via an SPI bus.
 
+config SND_SOC_PCM186X
+	tristate
+
+config SND_SOC_PCM186X_I2C
+	tristate "Texas Instruments PCM186x CODECs - I2C"
+	depends on I2C
+	select SND_SOC_PCM186X
+	select REGMAP_I2C
+
+config SND_SOC_PCM186X_SPI
+	tristate "Texas Instruments PCM186x CODECs - SPI"
+	depends on SPI_MASTER
+	select SND_SOC_PCM186X
+	select REGMAP_SPI
+
 config SND_SOC_PCM3008
        tristate
 
@@ -883,6 +907,13 @@ config SND_SOC_TAS5720
 	  Enable support for Texas Instruments TAS5720L/M high-efficiency mono
 	  Class-D audio power amplifiers.
 
+config SND_SOC_TAS6424
+	tristate "Texas Instruments TAS6424 Quad-Channel Audio amplifier"
+	depends on I2C
+	help
+	  Enable support for Texas Instruments TAS6424 high-efficiency
+	  digital input quad-channel Class-D audio power amplifiers.
+
 config SND_SOC_TFA9879
 	tristate "NXP Semiconductors TFA9879 amplifier"
 	depends on I2C
@@ -913,12 +944,12 @@ config SND_SOC_TLV320AIC32X4
 	tristate
 
 config SND_SOC_TLV320AIC32X4_I2C
-	tristate
+	tristate "Texas Instruments TLV320AIC32x4 audio CODECs - I2C"
 	depends on I2C
 	select SND_SOC_TLV320AIC32X4
 
 config SND_SOC_TLV320AIC32X4_SPI
-	tristate
+	tristate "Texas Instruments TLV320AIC32x4 audio CODECs - SPI"
 	depends on SPI_MASTER
 	select SND_SOC_TLV320AIC32X4
 
@@ -933,6 +964,13 @@ config SND_SOC_TS3A227E
 	tristate "TI Headset/Mic detect and keypress chip"
 	depends on I2C
 
+config SND_SOC_TSCS42XX
+	tristate "Tempo Semiconductor TSCS42xx CODEC"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	  Add support for Tempo Semiconductor's TSCS42xx audio CODEC.
+
 config SND_SOC_TWL4030
 	select MFD_TWL4030_AUDIO
 	tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 0001069ce2a7e2f346cd7775f144864282d622e8..fcc5073f6d614a4ee5d01f59434662c3e4bd5e56 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -90,6 +90,7 @@ snd-soc-max9867-objs := max9867.o
 snd-soc-max98925-objs := max98925.o
 snd-soc-max98926-objs := max98926.o
 snd-soc-max98927-objs := max98927.o
+snd-soc-max98373-objs := max98373.o
 snd-soc-max9850-objs := max9850.o
 snd-soc-max9860-objs := max9860.o
 snd-soc-mc13783-objs := mc13783.o
@@ -105,6 +106,9 @@ snd-soc-pcm1681-objs := pcm1681.o
 snd-soc-pcm179x-codec-objs := pcm179x.o
 snd-soc-pcm179x-i2c-objs := pcm179x-i2c.o
 snd-soc-pcm179x-spi-objs := pcm179x-spi.o
+snd-soc-pcm186x-objs := pcm186x.o
+snd-soc-pcm186x-i2c-objs := pcm186x-i2c.o
+snd-soc-pcm186x-spi-objs := pcm186x-spi.o
 snd-soc-pcm3008-objs := pcm3008.o
 snd-soc-pcm3168a-objs := pcm3168a.o
 snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
@@ -156,6 +160,7 @@ snd-soc-sti-sas-objs := sti-sas.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tas571x-objs := tas571x.o
 snd-soc-tas5720-objs := tas5720.o
+snd-soc-tas6424-objs := tas6424.o
 snd-soc-tfa9879-objs := tfa9879.o
 snd-soc-tlv320aic23-objs := tlv320aic23.o
 snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -167,6 +172,7 @@ snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
 snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
 snd-soc-tlv320aic3x-objs := tlv320aic3x.o
 snd-soc-tlv320dac33-objs := tlv320dac33.o
+snd-soc-tscs42xx-objs := tscs42xx.o
 snd-soc-ts3a227e-objs := ts3a227e.o
 snd-soc-twl4030-objs := twl4030.o
 snd-soc-twl6040-objs := twl6040.o
@@ -330,6 +336,7 @@ obj-$(CONFIG_SND_SOC_MAX9867)	+= snd-soc-max9867.o
 obj-$(CONFIG_SND_SOC_MAX98925)	+= snd-soc-max98925.o
 obj-$(CONFIG_SND_SOC_MAX98926)	+= snd-soc-max98926.o
 obj-$(CONFIG_SND_SOC_MAX98927)	+= snd-soc-max98927.o
+obj-$(CONFIG_SND_SOC_MAX98373)	+= snd-soc-max98373.o
 obj-$(CONFIG_SND_SOC_MAX9850)	+= snd-soc-max9850.o
 obj-$(CONFIG_SND_SOC_MAX9860)	+= snd-soc-max9860.o
 obj-$(CONFIG_SND_SOC_MC13783)	+= snd-soc-mc13783.o
@@ -345,6 +352,9 @@ obj-$(CONFIG_SND_SOC_PCM1681)	+= snd-soc-pcm1681.o
 obj-$(CONFIG_SND_SOC_PCM179X)	+= snd-soc-pcm179x-codec.o
 obj-$(CONFIG_SND_SOC_PCM179X_I2C)	+= snd-soc-pcm179x-i2c.o
 obj-$(CONFIG_SND_SOC_PCM179X_SPI)	+= snd-soc-pcm179x-spi.o
+obj-$(CONFIG_SND_SOC_PCM186X)	+= snd-soc-pcm186x.o
+obj-$(CONFIG_SND_SOC_PCM186X_I2C)	+= snd-soc-pcm186x-i2c.o
+obj-$(CONFIG_SND_SOC_PCM186X_SPI)	+= snd-soc-pcm186x-spi.o
 obj-$(CONFIG_SND_SOC_PCM3008)	+= snd-soc-pcm3008.o
 obj-$(CONFIG_SND_SOC_PCM3168A)	+= snd-soc-pcm3168a.o
 obj-$(CONFIG_SND_SOC_PCM3168A_I2C)	+= snd-soc-pcm3168a-i2c.o
@@ -395,6 +405,7 @@ obj-$(CONFIG_SND_SOC_TAS2552)	+= snd-soc-tas2552.o
 obj-$(CONFIG_SND_SOC_TAS5086)	+= snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TAS571X)	+= snd-soc-tas571x.o
 obj-$(CONFIG_SND_SOC_TAS5720)	+= snd-soc-tas5720.o
+obj-$(CONFIG_SND_SOC_TAS6424)	+= snd-soc-tas6424.o
 obj-$(CONFIG_SND_SOC_TFA9879)	+= snd-soc-tfa9879.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23)	+= snd-soc-tlv320aic23.o
 obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C)	+= snd-soc-tlv320aic23-i2c.o
@@ -406,6 +417,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C)	+= snd-soc-tlv320aic32x4-i2c.o
 obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI)	+= snd-soc-tlv320aic32x4-spi.o
 obj-$(CONFIG_SND_SOC_TLV320AIC3X)	+= snd-soc-tlv320aic3x.o
 obj-$(CONFIG_SND_SOC_TLV320DAC33)	+= snd-soc-tlv320dac33.o
+obj-$(CONFIG_SND_SOC_TSCS42XX)	+= snd-soc-tscs42xx.o
 obj-$(CONFIG_SND_SOC_TS3A227E)	+= snd-soc-ts3a227e.o
 obj-$(CONFIG_SND_SOC_TWL4030)	+= snd-soc-twl4030.o
 obj-$(CONFIG_SND_SOC_TWL6040)	+= snd-soc-twl6040.o
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 6ed2cc374768735f88092c0b4e92be720bfe0fce..3bf93652bb314ea2476b6511069b66f146ec90cc 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -121,17 +121,19 @@ static struct snd_soc_dai_driver cq93vc_dai = {
 	.ops = &cq93vc_dai_ops,
 };
 
-static struct regmap *cq93vc_get_regmap(struct device *dev)
+static int cq93vc_probe(struct snd_soc_component *component)
 {
-	struct davinci_vc *davinci_vc = dev->platform_data;
+	struct davinci_vc *davinci_vc = component->dev->platform_data;
 
-	return davinci_vc->regmap;
+	snd_soc_component_init_regmap(component, davinci_vc->regmap);
+
+	return 0;
 }
 
 static const struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
 	.set_bias_level = cq93vc_set_bias_level,
-	.get_regmap = cq93vc_get_regmap,
 	.component_driver = {
+		.probe = cq93vc_probe,
 		.controls = cq93vc_snd_controls,
 		.num_controls = ARRAY_SIZE(cq93vc_snd_controls),
 	},
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 7e9806206648c0eafc9a11ca468027ff3b67a0c0..bc3a72e4c4ed73d4d7d674603c14334d6461596a 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -355,13 +355,9 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
 	unsigned int devid = 0;
 	unsigned int reg;
 
-
-	cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs35l32_private),
-			       GFP_KERNEL);
-	if (!cs35l32) {
-		dev_err(&i2c_client->dev, "could not allocate codec\n");
+	cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l32), GFP_KERNEL);
+	if (!cs35l32)
 		return -ENOMEM;
-	}
 
 	i2c_set_clientdata(i2c_client, cs35l32);
 
@@ -375,13 +371,11 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
 	if (pdata) {
 		cs35l32->pdata = *pdata;
 	} else {
-		pdata = devm_kzalloc(&i2c_client->dev,
-				     sizeof(struct cs35l32_platform_data),
-				GFP_KERNEL);
-		if (!pdata) {
-			dev_err(&i2c_client->dev, "could not allocate pdata\n");
+		pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+				     GFP_KERNEL);
+		if (!pdata)
 			return -ENOMEM;
-		}
+
 		if (i2c_client->dev.of_node) {
 			ret = cs35l32_handle_of_data(i2c_client,
 						     &cs35l32->pdata);
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 1e05026bedcadeb422c3f6069db0a0d5e9a83e1f..0600d5264c4c7de155f92e1c27b6ac2c6acb6d58 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -1004,13 +1004,9 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
 	unsigned int devid = 0;
 	unsigned int reg;
 
-	cs35l34 = devm_kzalloc(&i2c_client->dev,
-			       sizeof(struct cs35l34_private),
-			       GFP_KERNEL);
-	if (!cs35l34) {
-		dev_err(&i2c_client->dev, "could not allocate codec\n");
+	cs35l34 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l34), GFP_KERNEL);
+	if (!cs35l34)
 		return -ENOMEM;
-	}
 
 	i2c_set_clientdata(i2c_client, cs35l34);
 	cs35l34->regmap = devm_regmap_init_i2c(i2c_client, &cs35l34_regmap);
@@ -1044,14 +1040,11 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
 	if (pdata) {
 		cs35l34->pdata = *pdata;
 	} else {
-		pdata = devm_kzalloc(&i2c_client->dev,
-				sizeof(struct cs35l34_platform_data),
-				GFP_KERNEL);
-		if (!pdata) {
-			dev_err(&i2c_client->dev,
-				"could not allocate pdata\n");
+		pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+				     GFP_KERNEL);
+		if (!pdata)
 			return -ENOMEM;
-		}
+
 		if (i2c_client->dev.of_node) {
 			ret = cs35l34_handle_of_data(i2c_client, pdata);
 			if (ret != 0)
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 0d9c4a57301bb4a150c34d0f4e29384cd55915f5..9731e5dff291495e168f505e0a0126e4b8fbd4ea 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1100,8 +1100,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
 	unsigned int reg;
 	u32 val32;
 
-	cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l52_private),
-			       GFP_KERNEL);
+	cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l52), GFP_KERNEL);
 	if (cs42l52 == NULL)
 		return -ENOMEM;
 	cs42l52->dev = &i2c_client->dev;
@@ -1115,13 +1114,11 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
 	if (pdata) {
 		cs42l52->pdata = *pdata;
 	} else {
-		pdata = devm_kzalloc(&i2c_client->dev,
-				     sizeof(struct cs42l52_platform_data),
-				GFP_KERNEL);
-		if (!pdata) {
-			dev_err(&i2c_client->dev, "could not allocate pdata\n");
+		pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+				     GFP_KERNEL);
+		if (!pdata)
 			return -ENOMEM;
-		}
+
 		if (i2c_client->dev.of_node) {
 			if (of_property_read_bool(i2c_client->dev.of_node,
 				"cirrus,mica-differential-cfg"))
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index cb6ca85f15362cca9d11a1dfe9519f012dd82466..fd7b8d32c2b21276b6972fbacad6c8a1ba3ee496 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1190,9 +1190,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
 	unsigned int alpha_rev, metal_rev;
 	unsigned int reg;
 
-	cs42l56 = devm_kzalloc(&i2c_client->dev,
-			       sizeof(struct cs42l56_private),
-			       GFP_KERNEL);
+	cs42l56 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l56), GFP_KERNEL);
 	if (cs42l56 == NULL)
 		return -ENOMEM;
 	cs42l56->dev = &i2c_client->dev;
@@ -1207,14 +1205,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
 	if (pdata) {
 		cs42l56->pdata = *pdata;
 	} else {
-		pdata = devm_kzalloc(&i2c_client->dev,
-				     sizeof(struct cs42l56_platform_data),
+		pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
 				     GFP_KERNEL);
-		if (!pdata) {
-			dev_err(&i2c_client->dev,
-				"could not allocate pdata\n");
+		if (!pdata)
 			return -ENOMEM;
-		}
+
 		if (i2c_client->dev.of_node) {
 			ret = cs42l56_handle_of_data(i2c_client,
 						     &cs42l56->pdata);
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 3df2c473ab8834cd27d0f8daa732aaacc3526144..dde37e569ade934d98cb198196985acd1fbdd4be 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1289,8 +1289,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
 	unsigned int reg;
 	u32 val32;
 
-	cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l73_private),
-			       GFP_KERNEL);
+	cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l73), GFP_KERNEL);
 	if (!cs42l73)
 		return -ENOMEM;
 
@@ -1304,13 +1303,11 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
 	if (pdata) {
 		cs42l73->pdata = *pdata;
 	} else {
-		pdata = devm_kzalloc(&i2c_client->dev,
-				     sizeof(struct cs42l73_platform_data),
-				GFP_KERNEL);
-		if (!pdata) {
-			dev_err(&i2c_client->dev, "could not allocate pdata\n");
+		pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+				     GFP_KERNEL);
+		if (!pdata)
 			return -ENOMEM;
-		}
+
 		if (i2c_client->dev.of_node) {
 			if (of_property_read_u32(i2c_client->dev.of_node,
 				"chgfreq", &val32) >= 0)
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 94c0209977d0bfd2a9a63bc0a4858d26a5a5a3a5..be2750680838cacf688e748b47f2010326117184 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1120,9 +1120,11 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->core.arizona;
 	int ret;
 
-	priv->core.arizona->dapm = dapm;
+	arizona->dapm = dapm;
+	snd_soc_codec_init_regmap(codec, arizona->regmap);
 
 	ret = arizona_init_spk(codec);
 	if (ret < 0)
@@ -1175,17 +1177,9 @@ static unsigned int cs47l24_digital_vu[] = {
 	ARIZONA_DAC_DIGITAL_VOLUME_4L,
 };
 
-static struct regmap *cs47l24_get_regmap(struct device *dev)
-{
-	struct cs47l24_priv *priv = dev_get_drvdata(dev);
-
-	return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_cs47l24 = {
 	.probe = cs47l24_codec_probe,
 	.remove = cs47l24_codec_remove,
-	.get_regmap = cs47l24_get_regmap,
 
 	.idle_bias_off = true,
 
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 46b1fbb66ebaaf2225f87dbe29305f3f8d0178b5..6b6f8e44369b01ea3342eaab01aa14413acf38c5 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -26,7 +26,7 @@
 
 
 struct cx20442_priv {
-	void *control_data;
+	struct tty_struct *tty;
 	struct regulator *por;
 };
 
@@ -88,17 +88,6 @@ static const struct snd_soc_dapm_route cx20442_audio_map[] = {
 	{"ADC", NULL, "Input Mixer"},
 };
 
-static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec,
-							unsigned int reg)
-{
-	u8 *reg_cache = codec->reg_cache;
-
-	if (reg >= codec->driver->reg_cache_size)
-		return -EINVAL;
-
-	return reg_cache[reg];
-}
-
 enum v253_vls {
 	V253_VLS_NONE = 0,
 	V253_VLS_T,
@@ -123,6 +112,8 @@ enum v253_vls {
 	V253_VLS_TEST,
 };
 
+#if 0
+/* FIXME : these function will be re-used */
 static int cx20442_pm_to_v253_vls(u8 value)
 {
 	switch (value & ~(1 << CX20442_AGC)) {
@@ -163,9 +154,9 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
 	if (reg >= codec->driver->reg_cache_size)
 		return -EINVAL;
 
-	/* hw_write and control_data pointers required for talking to the modem
+	/* tty and write pointers required for talking to the modem
 	 * are expected to be set by the line discipline initialization code */
-	if (!codec->hw_write || !cx20442->control_data)
+	if (!cx20442->tty || !cx20442->tty->ops->write)
 		return -EIO;
 
 	old = reg_cache[reg];
@@ -194,12 +185,12 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
 		return -ENOMEM;
 
 	dev_dbg(codec->dev, "%s: %s\n", __func__, buf);
-	if (codec->hw_write(cx20442->control_data, buf, len) != len)
+	if (cx20442->tty->ops->write(cx20442->tty, buf, len) != len)
 		return -EIO;
 
 	return 0;
 }
-
+#endif
 
 /*
  * Line discpline related code
@@ -252,8 +243,7 @@ static void v253_close(struct tty_struct *tty)
 	cx20442 = snd_soc_codec_get_drvdata(codec);
 
 	/* Prevent the codec driver from further accessing the modem */
-	codec->hw_write = NULL;
-	cx20442->control_data = NULL;
+	cx20442->tty = NULL;
 	codec->component.card->pop_time = 0;
 }
 
@@ -276,12 +266,11 @@ static void v253_receive(struct tty_struct *tty,
 
 	cx20442 = snd_soc_codec_get_drvdata(codec);
 
-	if (!cx20442->control_data) {
+	if (!cx20442->tty) {
 		/* First modem response, complete setup procedure */
 
 		/* Set up codec driver access to modem controls */
-		cx20442->control_data = tty;
-		codec->hw_write = (hw_write_t)tty->ops->write;
+		cx20442->tty = tty;
 		codec->component.card->pop_time = 1;
 	}
 }
@@ -367,10 +356,9 @@ static int cx20442_codec_probe(struct snd_soc_codec *codec)
 	cx20442->por = regulator_get(codec->dev, "POR");
 	if (IS_ERR(cx20442->por))
 		dev_warn(codec->dev, "failed to get the regulator");
-	cx20442->control_data = NULL;
+	cx20442->tty = NULL;
 
 	snd_soc_codec_set_drvdata(codec, cx20442);
-	codec->hw_write = NULL;
 	codec->component.card->pop_time = 0;
 
 	return 0;
@@ -381,8 +369,8 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
 {
 	struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
 
-	if (cx20442->control_data) {
-		struct tty_struct *tty = cx20442->control_data;
+	if (cx20442->tty) {
+		struct tty_struct *tty = cx20442->tty;
 		tty_hangup(tty);
 	}
 
@@ -402,11 +390,7 @@ static const struct snd_soc_codec_driver cx20442_codec_dev = {
 	.probe = 	cx20442_codec_probe,
 	.remove = 	cx20442_codec_remove,
 	.set_bias_level = cx20442_set_bias_level,
-	.reg_cache_default = &cx20442_reg,
-	.reg_cache_size = 1,
-	.reg_word_size = sizeof(u8),
-	.read = cx20442_read_reg_cache,
-	.write = cx20442_write,
+
 	.component_driver = {
 		.dapm_widgets		= cx20442_dapm_widgets,
 		.num_dapm_widgets	= ARRAY_SIZE(cx20442_dapm_widgets),
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 41d9b1da27c221272b54ad20846fb5cde3ee76ca..b2b4e90fc02adc5c25d9aa5511e360eaa0b4c76d 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1654,10 +1654,8 @@ static struct da7213_platform_data
 	u32 fw_val32;
 
 	pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata) {
-		dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+	if (!pdata)
 		return NULL;
-	}
 
 	if (device_property_read_u32(dev, "dlg,micbias1-lvl", &fw_val32) >= 0)
 		pdata->micbias1_lvl = da7213_of_micbias_lvl(codec, fw_val32);
@@ -1855,8 +1853,7 @@ static int da7213_i2c_probe(struct i2c_client *i2c,
 	struct da7213_priv *da7213;
 	int ret;
 
-	da7213 = devm_kzalloc(&i2c->dev, sizeof(struct da7213_priv),
-			      GFP_KERNEL);
+	da7213 = devm_kzalloc(&i2c->dev, sizeof(*da7213), GFP_KERNEL);
 	if (!da7213)
 		return -ENOMEM;
 
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index 56564ce90cb6b0cb156f08ae39cc46a828f32594..96c644a15b1137ec6ecc49868e5660553f7b6e1d 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -2455,10 +2455,8 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
 	u32 of_val32;
 
 	pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata) {
-		dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+	if (!pdata)
 		return NULL;
-	}
 
 	if (of_property_read_u32(np, "dlg,micbias1-lvl-millivolt", &of_val32) >= 0)
 		pdata->micbias1_lvl = da7218_of_micbias_lvl(codec, of_val32);
@@ -2527,8 +2525,6 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
 		hpldet_pdata = devm_kzalloc(codec->dev, sizeof(*hpldet_pdata),
 					    GFP_KERNEL);
 		if (!hpldet_pdata) {
-			dev_warn(codec->dev,
-				 "Failed to allocate memory for hpldet pdata\n");
 			of_node_put(hpldet_np);
 			return pdata;
 		}
@@ -3273,8 +3269,7 @@ static int da7218_i2c_probe(struct i2c_client *i2c,
 	struct da7218_priv *da7218;
 	int ret;
 
-	da7218 = devm_kzalloc(&i2c->dev, sizeof(struct da7218_priv),
-			      GFP_KERNEL);
+	da7218 = devm_kzalloc(&i2c->dev, sizeof(*da7218), GFP_KERNEL);
 	if (!da7218)
 		return -ENOMEM;
 
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index f3b4f4dfae6ad29cd9e7b34b38b54580ddea43b6..dba6f4c5074aeb09575f94f3adab20d6d7ec3f2f 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -136,8 +136,11 @@ struct hdac_hdmi_priv {
 	struct mutex pin_mutex;
 	struct hdac_chmap chmap;
 	struct hdac_hdmi_drv_data *drv_data;
+	struct snd_soc_dai_driver *dai_drv;
 };
 
+#define hdev_to_hdmi_priv(_hdev) ((to_ehdac_device(_hdev))->private_data)
+
 static struct hdac_hdmi_pcm *
 hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi,
 			   struct hdac_hdmi_cvt *cvt)
@@ -169,7 +172,7 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
 		 * ports.
 		 */
 		if (pcm->jack_event == 0) {
-			dev_dbg(&edev->hdac.dev,
+			dev_dbg(&edev->hdev.dev,
 					"jack report for pcm=%d\n",
 					pcm->pcm_id);
 			snd_soc_jack_report(pcm->jack, SND_JACK_AVOUT,
@@ -195,18 +198,18 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
 /*
  * Get the no devices that can be connected to a port on the Pin widget.
  */
-static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
+static int hdac_hdmi_get_port_len(struct hdac_ext_device *edev, hda_nid_t nid)
 {
 	unsigned int caps;
 	unsigned int type, param;
 
-	caps = get_wcaps(&hdac->hdac, nid);
+	caps = get_wcaps(&edev->hdev, nid);
 	type = get_wcaps_type(caps);
 
 	if (!(caps & AC_WCAP_DIGITAL) || (type != AC_WID_PIN))
 		return 0;
 
-	param = snd_hdac_read_parm_uncached(&hdac->hdac, nid,
+	param = snd_hdac_read_parm_uncached(&edev->hdev, nid,
 					AC_PAR_DEVLIST_LEN);
 	if (param == -1)
 		return param;
@@ -219,10 +222,10 @@ static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
  * id selected on the pin. Return 0 means the first port entry
  * is selected or MST is not supported.
  */
-static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_get(struct hdac_ext_device *edev,
 					struct hdac_hdmi_port *port)
 {
-	return snd_hdac_codec_read(&hdac->hdac, port->pin->nid,
+	return snd_hdac_codec_read(&edev->hdev, port->pin->nid,
 				0, AC_VERB_GET_DEVICE_SEL, 0);
 }
 
@@ -230,7 +233,7 @@ static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
  * Sets the selected port entry for the configuring Pin widget verb.
  * returns error if port set is not equal to port get otherwise success
  */
-static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_set(struct hdac_ext_device *edev,
 					struct hdac_hdmi_port *port)
 {
 	int num_ports;
@@ -239,7 +242,7 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
 		return 0;
 
 	/* AC_PAR_DEVLIST_LEN is 0 based. */
-	num_ports = hdac_hdmi_get_port_len(hdac, port->pin->nid);
+	num_ports = hdac_hdmi_get_port_len(edev, port->pin->nid);
 
 	if (num_ports < 0)
 		return -EIO;
@@ -250,13 +253,13 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
 	if (num_ports + 1  < port->id)
 		return 0;
 
-	snd_hdac_codec_write(&hdac->hdac, port->pin->nid, 0,
+	snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
 			AC_VERB_SET_DEVICE_SEL, port->id);
 
-	if (port->id != hdac_hdmi_port_select_get(hdac, port))
+	if (port->id != hdac_hdmi_port_select_get(edev, port))
 		return -EIO;
 
-	dev_dbg(&hdac->hdac.dev, "Selected the port=%d\n", port->id);
+	dev_dbg(&edev->hdev.dev, "Selected the port=%d\n", port->id);
 
 	return 0;
 }
@@ -276,9 +279,9 @@ static struct hdac_hdmi_pcm *get_hdmi_pcm_from_id(struct hdac_hdmi_priv *hdmi,
 
 static inline struct hdac_ext_device *to_hda_ext_device(struct device *dev)
 {
-	struct hdac_device *hdac = dev_to_hdac_dev(dev);
+	struct hdac_device *hdev = dev_to_hdac_dev(dev);
 
-	return to_ehdac_device(hdac);
+	return to_ehdac_device(hdev);
 }
 
 static unsigned int sad_format(const u8 *sad)
@@ -321,14 +324,14 @@ format_constraint:
 }
 
 static void
-hdac_hdmi_set_dip_index(struct hdac_ext_device *hdac, hda_nid_t pin_nid,
+hdac_hdmi_set_dip_index(struct hdac_ext_device *edev, hda_nid_t pin_nid,
 				int packet_index, int byte_index)
 {
 	int val;
 
 	val = (packet_index << 5) | (byte_index & 0x1f);
 
-	snd_hdac_codec_write(&hdac->hdac, pin_nid, 0,
+	snd_hdac_codec_write(&edev->hdev, pin_nid, 0,
 				AC_VERB_SET_HDMI_DIP_INDEX, val);
 }
 
@@ -344,14 +347,14 @@ struct dp_audio_infoframe {
 	u8 LFEPBL01_LSV36_DM_INH7;
 };
 
-static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
+static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *edev,
 		   struct hdac_hdmi_pcm *pcm, struct hdac_hdmi_port *port)
 {
 	uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
 	struct hdmi_audio_infoframe frame;
 	struct hdac_hdmi_pin *pin = port->pin;
 	struct dp_audio_infoframe dp_ai;
-	struct hdac_hdmi_priv *hdmi = hdac->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_cvt *cvt = pcm->cvt;
 	u8 *dip;
 	int ret;
@@ -360,11 +363,11 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
 	u8 conn_type;
 	int channels, ca;
 
-	ca = snd_hdac_channel_allocation(&hdac->hdac, port->eld.info.spk_alloc,
+	ca = snd_hdac_channel_allocation(&edev->hdev, port->eld.info.spk_alloc,
 			pcm->channels, pcm->chmap_set, true, pcm->chmap);
 
 	channels = snd_hdac_get_active_channels(ca);
-	hdmi->chmap.ops.set_channel_count(&hdac->hdac, cvt->nid, channels);
+	hdmi->chmap.ops.set_channel_count(&edev->hdev, cvt->nid, channels);
 
 	snd_hdac_setup_channel_mapping(&hdmi->chmap, pin->nid, false, ca,
 				pcm->channels, pcm->chmap, pcm->chmap_set);
@@ -397,32 +400,32 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
 		break;
 
 	default:
-		dev_err(&hdac->hdac.dev, "Invalid connection type: %d\n",
+		dev_err(&edev->hdev.dev, "Invalid connection type: %d\n",
 						conn_type);
 		return -EIO;
 	}
 
 	/* stop infoframe transmission */
-	hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
-	snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+	hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+	snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
 			AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_DISABLE);
 
 
 	/*  Fill infoframe. Index auto-incremented */
-	hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
+	hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
 	if (conn_type == DRM_ELD_CONN_TYPE_HDMI) {
 		for (i = 0; i < sizeof(buffer); i++)
-			snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+			snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
 				AC_VERB_SET_HDMI_DIP_DATA, buffer[i]);
 	} else {
 		for (i = 0; i < sizeof(dp_ai); i++)
-			snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+			snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
 				AC_VERB_SET_HDMI_DIP_DATA, dip[i]);
 	}
 
 	/* Start infoframe */
-	hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
-	snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+	hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+	snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
 			AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_BEST);
 
 	return 0;
@@ -433,11 +436,11 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
 		int slots, int slot_width)
 {
 	struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_dai_port_map *dai_map;
 	struct hdac_hdmi_pcm *pcm;
 
-	dev_dbg(&edev->hdac.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
+	dev_dbg(&edev->hdev.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
 
 	dai_map = &hdmi->dai_map[dai->id];
 
@@ -452,8 +455,8 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
 static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
 	struct snd_pcm_hw_params *hparams, struct snd_soc_dai *dai)
 {
-	struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
-	struct hdac_hdmi_priv *hdmi = hdac->private_data;
+	struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_dai_port_map *dai_map;
 	struct hdac_hdmi_port *port;
 	struct hdac_hdmi_pcm *pcm;
@@ -466,7 +469,7 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
 		return -ENODEV;
 
 	if ((!port->eld.monitor_present) || (!port->eld.eld_valid)) {
-		dev_err(&hdac->hdac.dev,
+		dev_err(&edev->hdev.dev,
 			"device is not configured for this pin:port%d:%d\n",
 					port->pin->nid, port->id);
 		return -ENODEV;
@@ -486,28 +489,28 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
 	return 0;
 }
 
-static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *hdac,
+static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *edev,
 					struct hdac_hdmi_pin *pin,
 					struct hdac_hdmi_port *port)
 {
-	if (!(get_wcaps(&hdac->hdac, pin->nid) & AC_WCAP_CONN_LIST)) {
-		dev_warn(&hdac->hdac.dev,
+	if (!(get_wcaps(&edev->hdev, pin->nid) & AC_WCAP_CONN_LIST)) {
+		dev_warn(&edev->hdev.dev,
 			"HDMI: pin %d wcaps %#x does not support connection list\n",
-			pin->nid, get_wcaps(&hdac->hdac, pin->nid));
+			pin->nid, get_wcaps(&edev->hdev, pin->nid));
 		return -EINVAL;
 	}
 
-	if (hdac_hdmi_port_select_set(hdac, port) < 0)
+	if (hdac_hdmi_port_select_set(edev, port) < 0)
 		return -EIO;
 
-	port->num_mux_nids = snd_hdac_get_connections(&hdac->hdac, pin->nid,
+	port->num_mux_nids = snd_hdac_get_connections(&edev->hdev, pin->nid,
 			port->mux_nids, HDA_MAX_CONNECTIONS);
 	if (port->num_mux_nids == 0)
-		dev_warn(&hdac->hdac.dev,
+		dev_warn(&edev->hdev.dev,
 			"No connections found for pin:port %d:%d\n",
 						pin->nid, port->id);
 
-	dev_dbg(&hdac->hdac.dev, "num_mux_nids %d for pin:port %d:%d\n",
+	dev_dbg(&edev->hdev.dev, "num_mux_nids %d for pin:port %d:%d\n",
 			port->num_mux_nids, pin->nid, port->id);
 
 	return port->num_mux_nids;
@@ -565,8 +568,8 @@ static struct hdac_hdmi_port *hdac_hdmi_get_port_from_cvt(
 static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
 			struct snd_soc_dai *dai)
 {
-	struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
-	struct hdac_hdmi_priv *hdmi = hdac->private_data;
+	struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_dai_port_map *dai_map;
 	struct hdac_hdmi_cvt *cvt;
 	struct hdac_hdmi_port *port;
@@ -575,7 +578,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
 	dai_map = &hdmi->dai_map[dai->id];
 
 	cvt = dai_map->cvt;
-	port = hdac_hdmi_get_port_from_cvt(hdac, hdmi, cvt);
+	port = hdac_hdmi_get_port_from_cvt(edev, hdmi, cvt);
 
 	/*
 	 * To make PA and other userland happy.
@@ -586,7 +589,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
 	if ((!port->eld.monitor_present) ||
 			(!port->eld.eld_valid)) {
 
-		dev_warn(&hdac->hdac.dev,
+		dev_warn(&edev->hdev.dev,
 			"Failed: present?:%d ELD valid?:%d pin:port: %d:%d\n",
 			port->eld.monitor_present, port->eld.eld_valid,
 			port->pin->nid, port->id);
@@ -608,8 +611,8 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
 static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
 		struct snd_soc_dai *dai)
 {
-	struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
-	struct hdac_hdmi_priv *hdmi = hdac->private_data;
+	struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_dai_port_map *dai_map;
 	struct hdac_hdmi_pcm *pcm;
 
@@ -630,14 +633,13 @@ static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
 }
 
 static int
-hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
+hdac_hdmi_query_cvt_params(struct hdac_device *hdev, struct hdac_hdmi_cvt *cvt)
 {
 	unsigned int chans;
-	struct hdac_ext_device *edev = to_ehdac_device(hdac);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
 	int err;
 
-	chans = get_wcaps(hdac, cvt->nid);
+	chans = get_wcaps(hdev, cvt->nid);
 	chans = get_wcaps_channels(chans);
 
 	cvt->params.channels_min = 2;
@@ -646,12 +648,12 @@ hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
 	if (chans > hdmi->chmap.channels_max)
 		hdmi->chmap.channels_max = chans;
 
-	err = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+	err = snd_hdac_query_supported_pcm(hdev, cvt->nid,
 			&cvt->params.rates,
 			&cvt->params.formats,
 			&cvt->params.maxbps);
 	if (err < 0)
-		dev_err(&hdac->dev,
+		dev_err(&hdev->dev,
 			"Failed to query pcm params for nid %d: %d\n",
 			cvt->nid, err);
 
@@ -696,7 +698,7 @@ static void hdac_hdmi_fill_route(struct snd_soc_dapm_route *route,
 static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
 					struct hdac_hdmi_port *port)
 {
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pcm *pcm = NULL;
 	struct hdac_hdmi_port *p;
 
@@ -716,9 +718,9 @@ static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
 static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
 			     hda_nid_t nid, unsigned int pwr_state)
 {
-	if (get_wcaps(&edev->hdac, nid) & AC_WCAP_POWER) {
-		if (!snd_hdac_check_power_state(&edev->hdac, nid, pwr_state))
-			snd_hdac_codec_write(&edev->hdac, nid, 0,
+	if (get_wcaps(&edev->hdev, nid) & AC_WCAP_POWER) {
+		if (!snd_hdac_check_power_state(&edev->hdev, nid, pwr_state))
+			snd_hdac_codec_write(&edev->hdev, nid, 0,
 				AC_VERB_SET_POWER_STATE, pwr_state);
 	}
 }
@@ -726,8 +728,8 @@ static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
 static void hdac_hdmi_set_amp(struct hdac_ext_device *edev,
 				   hda_nid_t nid, int val)
 {
-	if (get_wcaps(&edev->hdac, nid) & AC_WCAP_OUT_AMP)
-		snd_hdac_codec_write(&edev->hdac, nid, 0,
+	if (get_wcaps(&edev->hdev, nid) & AC_WCAP_OUT_AMP)
+		snd_hdac_codec_write(&edev->hdev, nid, 0,
 					AC_VERB_SET_AMP_GAIN_MUTE, val);
 }
 
@@ -739,7 +741,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
 	struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
 	struct hdac_hdmi_pcm *pcm;
 
-	dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+	dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
 			__func__, w->name, event);
 
 	pcm = hdac_hdmi_get_pcm(edev, port);
@@ -755,7 +757,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
 		hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D0);
 
 		/* Enable out path for this pin widget */
-		snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
 				AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
 
 		hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_UNMUTE);
@@ -766,7 +768,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
 		hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_MUTE);
 
 		/* Disable out path for this pin widget */
-		snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
 				AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
 
 		hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D3);
@@ -782,10 +784,10 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
 {
 	struct hdac_hdmi_cvt *cvt = w->priv;
 	struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pcm *pcm;
 
-	dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+	dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
 			__func__, w->name, event);
 
 	pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, cvt);
@@ -797,23 +799,23 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
 		hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D0);
 
 		/* Enable transmission */
-		snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
 			AC_VERB_SET_DIGI_CONVERT_1, 1);
 
 		/* Category Code (CC) to zero */
-		snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
 			AC_VERB_SET_DIGI_CONVERT_2, 0);
 
-		snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
 				AC_VERB_SET_CHANNEL_STREAMID, pcm->stream_tag);
-		snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
 				AC_VERB_SET_STREAM_FORMAT, pcm->format);
 		break;
 
 	case SND_SOC_DAPM_POST_PMD:
-		snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
 				AC_VERB_SET_CHANNEL_STREAMID, 0);
-		snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
 				AC_VERB_SET_STREAM_FORMAT, 0);
 
 		hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D3);
@@ -831,7 +833,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
 	struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
 	int mux_idx;
 
-	dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+	dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
 			__func__, w->name, event);
 
 	if (!kc)
@@ -844,7 +846,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
 		return -EIO;
 
 	if (mux_idx > 0) {
-		snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+		snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
 			AC_VERB_SET_CONNECT_SEL, (mux_idx - 1));
 	}
 
@@ -864,7 +866,7 @@ static int hdac_hdmi_set_pin_port_mux(struct snd_kcontrol *kcontrol,
 	struct snd_soc_dapm_context *dapm = w->dapm;
 	struct hdac_hdmi_port *port = w->priv;
 	struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pcm *pcm = NULL;
 	const char *cvt_name =  e->texts[ucontrol->value.enumerated.item[0]];
 
@@ -922,7 +924,7 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
 				struct snd_soc_dapm_widget *widget,
 				const char *widget_name)
 {
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pin *pin = port->pin;
 	struct snd_kcontrol_new *kc;
 	struct hdac_hdmi_cvt *cvt;
@@ -934,17 +936,17 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
 	int i = 0;
 	int num_items = hdmi->num_cvt + 1;
 
-	kc = devm_kzalloc(&edev->hdac.dev, sizeof(*kc), GFP_KERNEL);
+	kc = devm_kzalloc(&edev->hdev.dev, sizeof(*kc), GFP_KERNEL);
 	if (!kc)
 		return -ENOMEM;
 
-	se = devm_kzalloc(&edev->hdac.dev, sizeof(*se), GFP_KERNEL);
+	se = devm_kzalloc(&edev->hdev.dev, sizeof(*se), GFP_KERNEL);
 	if (!se)
 		return -ENOMEM;
 
 	snprintf(kc_name, NAME_SIZE, "Pin %d port %d Input",
 						pin->nid, port->id);
-	kc->name = devm_kstrdup(&edev->hdac.dev, kc_name, GFP_KERNEL);
+	kc->name = devm_kstrdup(&edev->hdev.dev, kc_name, GFP_KERNEL);
 	if (!kc->name)
 		return -ENOMEM;
 
@@ -962,24 +964,24 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
 	se->mask = roundup_pow_of_two(se->items) - 1;
 
 	sprintf(mux_items, "NONE");
-	items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+	items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
 	if (!items[i])
 		return -ENOMEM;
 
 	list_for_each_entry(cvt, &hdmi->cvt_list, head) {
 		i++;
 		sprintf(mux_items, "cvt %d", cvt->nid);
-		items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+		items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
 		if (!items[i])
 			return -ENOMEM;
 	}
 
-	se->texts = devm_kmemdup(&edev->hdac.dev, items,
+	se->texts = devm_kmemdup(&edev->hdev.dev, items,
 			(num_items  * sizeof(char *)), GFP_KERNEL);
 	if (!se->texts)
 		return -ENOMEM;
 
-	return hdac_hdmi_fill_widget_info(&edev->hdac.dev, widget,
+	return hdac_hdmi_fill_widget_info(&edev->hdev.dev, widget,
 			snd_soc_dapm_mux, port, widget_name, NULL, kc, 1,
 			hdac_hdmi_pin_mux_widget_event,
 			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_REG);
@@ -990,7 +992,7 @@ static void hdac_hdmi_add_pinmux_cvt_route(struct hdac_ext_device *edev,
 			struct snd_soc_dapm_widget *widgets,
 			struct snd_soc_dapm_route *route, int rindex)
 {
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	const struct snd_kcontrol_new *kc;
 	struct soc_enum *se;
 	int mux_index = hdmi->num_cvt + hdmi->num_ports;
@@ -1033,8 +1035,8 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
 	struct snd_soc_dapm_widget *widgets;
 	struct snd_soc_dapm_route *route;
 	struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
-	struct snd_soc_dai_driver *dai_drv = dapm->component->dai_drv;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+	struct snd_soc_dai_driver *dai_drv = hdmi->dai_drv;
 	char widget_name[NAME_SIZE];
 	struct hdac_hdmi_cvt *cvt;
 	struct hdac_hdmi_pin *pin;
@@ -1134,7 +1136,7 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
 
 static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
 {
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_dai_port_map *dai_map;
 	struct hdac_hdmi_cvt *cvt;
 	int dai_id = 0;
@@ -1150,7 +1152,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
 		dai_id++;
 
 		if (dai_id == HDA_MAX_CVTS) {
-			dev_warn(&edev->hdac.dev,
+			dev_warn(&edev->hdev.dev,
 				"Max dais supported: %d\n", dai_id);
 			break;
 		}
@@ -1161,7 +1163,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
 
 static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
 {
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_cvt *cvt;
 	char name[NAME_SIZE];
 
@@ -1176,7 +1178,7 @@ static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
 	list_add_tail(&cvt->head, &hdmi->cvt_list);
 	hdmi->num_cvt++;
 
-	return hdac_hdmi_query_cvt_params(&edev->hdac, cvt);
+	return hdac_hdmi_query_cvt_params(&edev->hdev, cvt);
 }
 
 static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
@@ -1188,7 +1190,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
 						>> DRM_ELD_VER_SHIFT;
 
 	if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) {
-		dev_err(&edev->hdac.dev, "HDMI: Unknown ELD version %d\n", ver);
+		dev_err(&edev->hdev.dev, "HDMI: Unknown ELD version %d\n", ver);
 		return -EINVAL;
 	}
 
@@ -1196,7 +1198,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
 		DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
 
 	if (mnl > ELD_MAX_MNL) {
-		dev_err(&edev->hdac.dev, "HDMI: MNL Invalid %d\n", mnl);
+		dev_err(&edev->hdev.dev, "HDMI: MNL Invalid %d\n", mnl);
 		return -EINVAL;
 	}
 
@@ -1209,7 +1211,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
 				    struct hdac_hdmi_port *port)
 {
 	struct hdac_ext_device *edev = pin->edev;
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pcm *pcm;
 	int size = 0;
 	int port_id = -1;
@@ -1227,7 +1229,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
 	if (pin->mst_capable)
 		port_id = port->id;
 
-	size = snd_hdac_acomp_get_eld(&edev->hdac, pin->nid, port_id,
+	size = snd_hdac_acomp_get_eld(&edev->hdev, pin->nid, port_id,
 				&port->eld.monitor_present,
 				port->eld.eld_buffer,
 				ELD_MAX_SIZE);
@@ -1250,7 +1252,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
 
 	if (!port->eld.monitor_present || !port->eld.eld_valid) {
 
-		dev_err(&edev->hdac.dev, "%s: disconnect for pin:port %d:%d\n",
+		dev_err(&edev->hdev.dev, "%s: disconnect for pin:port %d:%d\n",
 						__func__, pin->nid, port->id);
 
 		/*
@@ -1304,7 +1306,7 @@ static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi,
 
 static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
 {
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pin *pin;
 	int ret;
 
@@ -1333,40 +1335,38 @@ static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
 #define INTEL_EN_DP12			0x02 /* enable DP 1.2 features */
 #define INTEL_EN_ALL_PIN_CVTS	0x01 /* enable 2nd & 3rd pins and convertors */
 
-static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdev)
 {
 	unsigned int vendor_param;
-	struct hdac_ext_device *edev = to_ehdac_device(hdac);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
 	unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
 
-	vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+	vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
 				INTEL_GET_VENDOR_VERB, 0);
 	if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
 		return;
 
 	vendor_param |= INTEL_EN_ALL_PIN_CVTS;
-	vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+	vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
 				INTEL_SET_VENDOR_VERB, vendor_param);
 	if (vendor_param == -1)
 		return;
 }
 
-static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdev)
 {
 	unsigned int vendor_param;
-	struct hdac_ext_device *edev = to_ehdac_device(hdac);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
 	unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
 
-	vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+	vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
 				INTEL_GET_VENDOR_VERB, 0);
 	if (vendor_param == -1 || vendor_param & INTEL_EN_DP12)
 		return;
 
 	/* enable DP1.2 mode */
 	vendor_param |= INTEL_EN_DP12;
-	vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+	vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
 				INTEL_SET_VENDOR_VERB, vendor_param);
 	if (vendor_param == -1)
 		return;
@@ -1384,7 +1384,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
  * Each converter can support a stream independently. So a dai is created
  * based on the number of converter queried.
  */
-static int hdac_hdmi_create_dais(struct hdac_device *hdac,
+static int hdac_hdmi_create_dais(struct hdac_device *hdev,
 		struct snd_soc_dai_driver **dais,
 		struct hdac_hdmi_priv *hdmi, int num_dais)
 {
@@ -1397,20 +1397,20 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
 	u64 formats;
 	int ret;
 
-	hdmi_dais = devm_kzalloc(&hdac->dev,
+	hdmi_dais = devm_kzalloc(&hdev->dev,
 			(sizeof(*hdmi_dais) * num_dais),
 			GFP_KERNEL);
 	if (!hdmi_dais)
 		return -ENOMEM;
 
 	list_for_each_entry(cvt, &hdmi->cvt_list, head) {
-		ret = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+		ret = snd_hdac_query_supported_pcm(hdev, cvt->nid,
 					&rates,	&formats, &bps);
 		if (ret)
 			return ret;
 
 		sprintf(dai_name, "intel-hdmi-hifi%d", i+1);
-		hdmi_dais[i].name = devm_kstrdup(&hdac->dev,
+		hdmi_dais[i].name = devm_kstrdup(&hdev->dev,
 					dai_name, GFP_KERNEL);
 
 		if (!hdmi_dais[i].name)
@@ -1418,7 +1418,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
 
 		snprintf(name, sizeof(name), "hifi%d", i+1);
 		hdmi_dais[i].playback.stream_name =
-				devm_kstrdup(&hdac->dev, name, GFP_KERNEL);
+				devm_kstrdup(&hdev->dev, name, GFP_KERNEL);
 		if (!hdmi_dais[i].playback.stream_name)
 			return -ENOMEM;
 
@@ -1438,6 +1438,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
 	}
 
 	*dais = hdmi_dais;
+	hdmi->dai_drv = hdmi_dais;
 
 	return 0;
 }
@@ -1451,29 +1452,26 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
 {
 	hda_nid_t nid;
 	int i, num_nodes;
-	struct hdac_device *hdac = &edev->hdac;
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
 	struct hdac_hdmi_cvt *temp_cvt, *cvt_next;
 	struct hdac_hdmi_pin *temp_pin, *pin_next;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+	struct hdac_device *hdev = &edev->hdev;
 	int ret;
 
-	hdac_hdmi_skl_enable_all_pins(hdac);
-	hdac_hdmi_skl_enable_dp12(hdac);
+	hdac_hdmi_skl_enable_all_pins(hdev);
+	hdac_hdmi_skl_enable_dp12(hdev);
 
-	num_nodes = snd_hdac_get_sub_nodes(hdac, hdac->afg, &nid);
+	num_nodes = snd_hdac_get_sub_nodes(hdev, hdev->afg, &nid);
 	if (!nid || num_nodes <= 0) {
-		dev_warn(&hdac->dev, "HDMI: failed to get afg sub nodes\n");
+		dev_warn(&hdev->dev, "HDMI: failed to get afg sub nodes\n");
 		return -EINVAL;
 	}
 
-	hdac->num_nodes = num_nodes;
-	hdac->start_nid = nid;
-
-	for (i = 0; i < hdac->num_nodes; i++, nid++) {
+	for (i = 0; i < num_nodes; i++, nid++) {
 		unsigned int caps;
 		unsigned int type;
 
-		caps = get_wcaps(hdac, nid);
+		caps = get_wcaps(hdev, nid);
 		type = get_wcaps_type(caps);
 
 		if (!(caps & AC_WCAP_DIGITAL))
@@ -1495,16 +1493,14 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
 		}
 	}
 
-	hdac->end_nid = nid;
-
 	if (!hdmi->num_pin || !hdmi->num_cvt) {
 		ret = -EIO;
 		goto free_widgets;
 	}
 
-	ret = hdac_hdmi_create_dais(hdac, dais, hdmi, hdmi->num_cvt);
+	ret = hdac_hdmi_create_dais(hdev, dais, hdmi, hdmi->num_cvt);
 	if (ret) {
-		dev_err(&hdac->dev, "Failed to create dais with err: %d\n",
+		dev_err(&hdev->dev, "Failed to create dais with err: %d\n",
 							ret);
 		goto free_widgets;
 	}
@@ -1537,7 +1533,7 @@ free_widgets:
 static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
 {
 	struct hdac_ext_device *edev = aptr;
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pin *pin = NULL;
 	struct hdac_hdmi_port *hport = NULL;
 	struct snd_soc_codec *codec = edev->scodec;
@@ -1546,7 +1542,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
 	/* Don't know how this mapping is derived */
 	hda_nid_t pin_nid = port + 0x04;
 
-	dev_dbg(&edev->hdac.dev, "%s: for pin:%d port=%d\n", __func__,
+	dev_dbg(&edev->hdev.dev, "%s: for pin:%d port=%d\n", __func__,
 							pin_nid, pipe);
 
 	/*
@@ -1559,7 +1555,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
 			SNDRV_CTL_POWER_D0)
 		return;
 
-	if (atomic_read(&edev->hdac.in_pm))
+	if (atomic_read(&edev->hdev.in_pm))
 		return;
 
 	list_for_each_entry(pin, &hdmi->pin_list, head) {
@@ -1614,7 +1610,7 @@ static int create_fill_jack_kcontrols(struct snd_soc_card *card,
 	char *name;
 	int i = 0, j;
 	struct snd_soc_codec *codec = edev->scodec;
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 
 	kc = devm_kcalloc(codec->dev, hdmi->num_ports,
 				sizeof(*kc), GFP_KERNEL);
@@ -1652,7 +1648,7 @@ int hdac_hdmi_jack_port_init(struct snd_soc_codec *codec,
 			struct snd_soc_dapm_context *dapm)
 {
 	struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pin *pin;
 	struct snd_soc_dapm_widget *widgets;
 	struct snd_soc_dapm_route *route;
@@ -1728,7 +1724,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pcm *pcm;
 	struct snd_pcm *snd_pcm;
 	int err;
@@ -1750,7 +1746,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
 	if (snd_pcm) {
 		err = snd_hdac_add_chmap_ctls(snd_pcm, device, &hdmi->chmap);
 		if (err < 0) {
-			dev_err(&edev->hdac.dev,
+			dev_err(&edev->hdev.dev,
 				"chmap control add failed with err: %d for pcm: %d\n",
 				err, device);
 			kfree(pcm);
@@ -1791,7 +1787,7 @@ static void hdac_hdmi_present_sense_all_pins(struct hdac_ext_device *edev,
 static int hdmi_codec_probe(struct snd_soc_codec *codec)
 {
 	struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct snd_soc_dapm_context *dapm =
 		snd_soc_component_get_dapm(&codec->component);
 	struct hdac_ext_link *hlink = NULL;
@@ -1803,9 +1799,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
 	 * hold the ref while we probe, also no need to drop the ref on
 	 * exit, we call pm_runtime_suspend() so that will do for us
 	 */
-	hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+	hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
 	if (!hlink) {
-		dev_err(&edev->hdac.dev, "hdac link not found\n");
+		dev_err(&edev->hdev.dev, "hdac link not found\n");
 		return -EIO;
 	}
 
@@ -1818,7 +1814,7 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
 	aops.audio_ptr = edev;
 	ret = snd_hdac_i915_register_notifier(&aops);
 	if (ret < 0) {
-		dev_err(&edev->hdac.dev, "notifier register failed: err: %d\n",
+		dev_err(&edev->hdev.dev, "notifier register failed: err: %d\n",
 				ret);
 		return ret;
 	}
@@ -1831,9 +1827,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
 	 * hdac_device core already sets the state to active and calls
 	 * get_noresume. So enable runtime and set the device to suspend.
 	 */
-	pm_runtime_enable(&edev->hdac.dev);
-	pm_runtime_put(&edev->hdac.dev);
-	pm_runtime_suspend(&edev->hdac.dev);
+	pm_runtime_enable(&edev->hdev.dev);
+	pm_runtime_put(&edev->hdev.dev);
+	pm_runtime_suspend(&edev->hdev.dev);
 
 	return 0;
 }
@@ -1842,7 +1838,7 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
 {
 	struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
 
-	pm_runtime_disable(&edev->hdac.dev);
+	pm_runtime_disable(&edev->hdev.dev);
 	return 0;
 }
 
@@ -1850,9 +1846,9 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
 static int hdmi_codec_prepare(struct device *dev)
 {
 	struct hdac_ext_device *edev = to_hda_ext_device(dev);
-	struct hdac_device *hdac = &edev->hdac;
+	struct hdac_device *hdev = &edev->hdev;
 
-	pm_runtime_get_sync(&edev->hdac.dev);
+	pm_runtime_get_sync(&edev->hdev.dev);
 
 	/*
 	 * Power down afg.
@@ -1861,7 +1857,7 @@ static int hdmi_codec_prepare(struct device *dev)
 	 * is received. So setting power state is ensured without using loop
 	 * to read the state.
 	 */
-	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+	snd_hdac_codec_read(hdev, hdev->afg, 0,	AC_VERB_SET_POWER_STATE,
 							AC_PWRST_D3);
 
 	return 0;
@@ -1870,15 +1866,15 @@ static int hdmi_codec_prepare(struct device *dev)
 static void hdmi_codec_complete(struct device *dev)
 {
 	struct hdac_ext_device *edev = to_hda_ext_device(dev);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
-	struct hdac_device *hdac = &edev->hdac;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+	struct hdac_device *hdev = &edev->hdev;
 
 	/* Power up afg */
-	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+	snd_hdac_codec_read(hdev, hdev->afg, 0,	AC_VERB_SET_POWER_STATE,
 							AC_PWRST_D0);
 
-	hdac_hdmi_skl_enable_all_pins(&edev->hdac);
-	hdac_hdmi_skl_enable_dp12(&edev->hdac);
+	hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+	hdac_hdmi_skl_enable_dp12(&edev->hdev);
 
 	/*
 	 * As the ELD notify callback request is not entertained while the
@@ -1888,7 +1884,7 @@ static void hdmi_codec_complete(struct device *dev)
 	 */
 	hdac_hdmi_present_sense_all_pins(edev, hdmi, false);
 
-	pm_runtime_put_sync(&edev->hdac.dev);
+	pm_runtime_put_sync(&edev->hdev.dev);
 }
 #else
 #define hdmi_codec_prepare NULL
@@ -1901,21 +1897,20 @@ static const struct snd_soc_codec_driver hdmi_hda_codec = {
 	.idle_bias_off	= true,
 };
 
-static void hdac_hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_get_chmap(struct hdac_device *hdev, int pcm_idx,
 					unsigned char *chmap)
 {
-	struct hdac_ext_device *edev = to_ehdac_device(hdac);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
 	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
 
 	memcpy(chmap, pcm->chmap, ARRAY_SIZE(pcm->chmap));
 }
 
-static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_set_chmap(struct hdac_device *hdev, int pcm_idx,
 				unsigned char *chmap, int prepared)
 {
-	struct hdac_ext_device *edev = to_ehdac_device(hdac);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_ext_device *edev = to_ehdac_device(hdev);
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
 	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
 	struct hdac_hdmi_port *port;
 
@@ -1934,10 +1929,9 @@ static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
 	mutex_unlock(&pcm->lock);
 }
 
-static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
+static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdev, int pcm_idx)
 {
-	struct hdac_ext_device *edev = to_ehdac_device(hdac);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
 	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
 
 	if (!pcm)
@@ -1949,10 +1943,9 @@ static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
 	return true;
 }
 
-static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
+static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdev, int pcm_idx)
 {
-	struct hdac_ext_device *edev = to_ehdac_device(hdac);
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
 	struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
 	struct hdac_hdmi_port *port;
 
@@ -1983,30 +1976,30 @@ static struct hdac_hdmi_drv_data intel_drv_data  = {
 
 static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 {
-	struct hdac_device *codec = &edev->hdac;
+	struct hdac_device *hdev = &edev->hdev;
 	struct hdac_hdmi_priv *hdmi_priv;
 	struct snd_soc_dai_driver *hdmi_dais = NULL;
 	struct hdac_ext_link *hlink = NULL;
 	int num_dais = 0;
 	int ret = 0;
-	struct hdac_driver *hdrv = drv_to_hdac_driver(codec->dev.driver);
-	const struct hda_device_id *hdac_id = hdac_get_device_id(codec, hdrv);
+	struct hdac_driver *hdrv = drv_to_hdac_driver(hdev->dev.driver);
+	const struct hda_device_id *hdac_id = hdac_get_device_id(hdev, hdrv);
 
 	/* hold the ref while we probe */
-	hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+	hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
 	if (!hlink) {
-		dev_err(&edev->hdac.dev, "hdac link not found\n");
+		dev_err(&edev->hdev.dev, "hdac link not found\n");
 		return -EIO;
 	}
 
 	snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
-	hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
+	hdmi_priv = devm_kzalloc(&hdev->dev, sizeof(*hdmi_priv), GFP_KERNEL);
 	if (hdmi_priv == NULL)
 		return -ENOMEM;
 
 	edev->private_data = hdmi_priv;
-	snd_hdac_register_chmap_ops(codec, &hdmi_priv->chmap);
+	snd_hdac_register_chmap_ops(hdev, &hdmi_priv->chmap);
 	hdmi_priv->chmap.ops.get_chmap = hdac_hdmi_get_chmap;
 	hdmi_priv->chmap.ops.set_chmap = hdac_hdmi_set_chmap;
 	hdmi_priv->chmap.ops.is_pcm_attached = is_hdac_hdmi_pcm_attached;
@@ -2021,7 +2014,7 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 	else
 		hdmi_priv->drv_data = &intel_drv_data;
 
-	dev_set_drvdata(&codec->dev, edev);
+	dev_set_drvdata(&hdev->dev, edev);
 
 	INIT_LIST_HEAD(&hdmi_priv->pin_list);
 	INIT_LIST_HEAD(&hdmi_priv->cvt_list);
@@ -2032,9 +2025,9 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 	 * Turned off in the runtime_suspend during the first explicit
 	 * pm_runtime_suspend call.
 	 */
-	ret = snd_hdac_display_power(edev->hdac.bus, true);
+	ret = snd_hdac_display_power(edev->hdev.bus, true);
 	if (ret < 0) {
-		dev_err(&edev->hdac.dev,
+		dev_err(&edev->hdev.dev,
 			"Cannot turn on display power on i915 err: %d\n",
 			ret);
 		return ret;
@@ -2042,13 +2035,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 
 	ret = hdac_hdmi_parse_and_map_nid(edev, &hdmi_dais, &num_dais);
 	if (ret < 0) {
-		dev_err(&codec->dev,
+		dev_err(&hdev->dev,
 			"Failed in parse and map nid with err: %d\n", ret);
 		return ret;
 	}
+	snd_hdac_refresh_widgets(hdev, true);
 
 	/* ASoC specific initialization */
-	ret = snd_soc_register_codec(&codec->dev, &hdmi_hda_codec,
+	ret = snd_soc_register_codec(&hdev->dev, &hdmi_hda_codec,
 					hdmi_dais, num_dais);
 
 	snd_hdac_ext_bus_link_put(edev->ebus, hlink);
@@ -2058,14 +2052,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 
 static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
 {
-	struct hdac_hdmi_priv *hdmi = edev->private_data;
+	struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
 	struct hdac_hdmi_pin *pin, *pin_next;
 	struct hdac_hdmi_cvt *cvt, *cvt_next;
 	struct hdac_hdmi_pcm *pcm, *pcm_next;
 	struct hdac_hdmi_port *port, *port_next;
 	int i;
 
-	snd_soc_unregister_codec(&edev->hdac.dev);
+	snd_soc_unregister_codec(&edev->hdev.dev);
 
 	list_for_each_entry_safe(pcm, pcm_next, &hdmi->pcm_list, head) {
 		pcm->cvt = NULL;
@@ -2101,8 +2095,8 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
 static int hdac_hdmi_runtime_suspend(struct device *dev)
 {
 	struct hdac_ext_device *edev = to_hda_ext_device(dev);
-	struct hdac_device *hdac = &edev->hdac;
-	struct hdac_bus *bus = hdac->bus;
+	struct hdac_device *hdev = &edev->hdev;
+	struct hdac_bus *bus = hdev->bus;
 	struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
 	struct hdac_ext_link *hlink = NULL;
 	int err;
@@ -2120,7 +2114,7 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
 	 * is received. So setting power state is ensured without using loop
 	 * to read the state.
 	 */
-	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+	snd_hdac_codec_read(hdev, hdev->afg, 0,	AC_VERB_SET_POWER_STATE,
 							AC_PWRST_D3);
 	err = snd_hdac_display_power(bus, false);
 	if (err < 0) {
@@ -2142,8 +2136,8 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
 static int hdac_hdmi_runtime_resume(struct device *dev)
 {
 	struct hdac_ext_device *edev = to_hda_ext_device(dev);
-	struct hdac_device *hdac = &edev->hdac;
-	struct hdac_bus *bus = hdac->bus;
+	struct hdac_device *hdev = &edev->hdev;
+	struct hdac_bus *bus = hdev->bus;
 	struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
 	struct hdac_ext_link *hlink = NULL;
 	int err;
@@ -2168,11 +2162,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
 		return err;
 	}
 
-	hdac_hdmi_skl_enable_all_pins(&edev->hdac);
-	hdac_hdmi_skl_enable_dp12(&edev->hdac);
+	hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+	hdac_hdmi_skl_enable_dp12(&edev->hdev);
 
 	/* Power up afg */
-	snd_hdac_codec_read(hdac, hdac->afg, 0,	AC_VERB_SET_POWER_STATE,
+	snd_hdac_codec_read(hdev, hdev->afg, 0,	AC_VERB_SET_POWER_STATE,
 							AC_PWRST_D0);
 
 	return 0;
@@ -2192,6 +2186,8 @@ static const struct hda_device_id hdmi_list[] = {
 	HDA_CODEC_EXT_ENTRY(0x80862809, 0x100000, "Skylake HDMI", 0),
 	HDA_CODEC_EXT_ENTRY(0x8086280a, 0x100000, "Broxton HDMI", 0),
 	HDA_CODEC_EXT_ENTRY(0x8086280b, 0x100000, "Kabylake HDMI", 0),
+	HDA_CODEC_EXT_ENTRY(0x8086280c, 0x100000, "Cannonlake HDMI",
+						   &intel_glk_drv_data),
 	HDA_CODEC_EXT_ENTRY(0x8086280d, 0x100000, "Geminilake HDMI",
 						   &intel_glk_drv_data),
 	{}
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
new file mode 100644
index 0000000000000000000000000000000000000000..9af0d985d6e915999d9c56f88a1bd1be7db34d5c
--- /dev/null
+++ b/sound/soc/codecs/max98373.c
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/tlv.h>
+#include "max98373.h"
+
+static struct reg_default max98373_reg[] = {
+	{MAX98373_R2000_SW_RESET, 0x00},
+	{MAX98373_R2001_INT_RAW1, 0x00},
+	{MAX98373_R2002_INT_RAW2, 0x00},
+	{MAX98373_R2003_INT_RAW3, 0x00},
+	{MAX98373_R2004_INT_STATE1, 0x00},
+	{MAX98373_R2005_INT_STATE2, 0x00},
+	{MAX98373_R2006_INT_STATE3, 0x00},
+	{MAX98373_R2007_INT_FLAG1, 0x00},
+	{MAX98373_R2008_INT_FLAG2, 0x00},
+	{MAX98373_R2009_INT_FLAG3, 0x00},
+	{MAX98373_R200A_INT_EN1, 0x00},
+	{MAX98373_R200B_INT_EN2, 0x00},
+	{MAX98373_R200C_INT_EN3, 0x00},
+	{MAX98373_R200D_INT_FLAG_CLR1, 0x00},
+	{MAX98373_R200E_INT_FLAG_CLR2, 0x00},
+	{MAX98373_R200F_INT_FLAG_CLR3, 0x00},
+	{MAX98373_R2010_IRQ_CTRL, 0x00},
+	{MAX98373_R2014_THERM_WARN_THRESH, 0x10},
+	{MAX98373_R2015_THERM_SHDN_THRESH, 0x27},
+	{MAX98373_R2016_THERM_HYSTERESIS, 0x01},
+	{MAX98373_R2017_THERM_FOLDBACK_SET, 0xC0},
+	{MAX98373_R2018_THERM_FOLDBACK_EN, 0x00},
+	{MAX98373_R201E_PIN_DRIVE_STRENGTH, 0x55},
+	{MAX98373_R2020_PCM_TX_HIZ_EN_1, 0xFE},
+	{MAX98373_R2021_PCM_TX_HIZ_EN_2, 0xFF},
+	{MAX98373_R2022_PCM_TX_SRC_1, 0x00},
+	{MAX98373_R2023_PCM_TX_SRC_2, 0x00},
+	{MAX98373_R2024_PCM_DATA_FMT_CFG, 0xC0},
+	{MAX98373_R2025_AUDIO_IF_MODE, 0x00},
+	{MAX98373_R2026_PCM_CLOCK_RATIO, 0x04},
+	{MAX98373_R2027_PCM_SR_SETUP_1, 0x08},
+	{MAX98373_R2028_PCM_SR_SETUP_2, 0x88},
+	{MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1, 0x00},
+	{MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2, 0x00},
+	{MAX98373_R202B_PCM_RX_EN, 0x00},
+	{MAX98373_R202C_PCM_TX_EN, 0x00},
+	{MAX98373_R202E_ICC_RX_CH_EN_1, 0x00},
+	{MAX98373_R202F_ICC_RX_CH_EN_2, 0x00},
+	{MAX98373_R2030_ICC_TX_HIZ_EN_1, 0xFF},
+	{MAX98373_R2031_ICC_TX_HIZ_EN_2, 0xFF},
+	{MAX98373_R2032_ICC_LINK_EN_CFG, 0x30},
+	{MAX98373_R2034_ICC_TX_CNTL, 0x00},
+	{MAX98373_R2035_ICC_TX_EN, 0x00},
+	{MAX98373_R2036_SOUNDWIRE_CTRL, 0x05},
+	{MAX98373_R203D_AMP_DIG_VOL_CTRL, 0x00},
+	{MAX98373_R203E_AMP_PATH_GAIN, 0x08},
+	{MAX98373_R203F_AMP_DSP_CFG, 0x02},
+	{MAX98373_R2040_TONE_GEN_CFG, 0x00},
+	{MAX98373_R2041_AMP_CFG, 0x03},
+	{MAX98373_R2042_AMP_EDGE_RATE_CFG, 0x00},
+	{MAX98373_R2043_AMP_EN, 0x00},
+	{MAX98373_R2046_IV_SENSE_ADC_DSP_CFG, 0x04},
+	{MAX98373_R2047_IV_SENSE_ADC_EN, 0x00},
+	{MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0x00},
+	{MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG, 0x00},
+	{MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG, 0x00},
+	{MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0x00},
+	{MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0x00},
+	{MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0x00},
+	{MAX98373_R2090_BDE_LVL_HOLD, 0x00},
+	{MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0x00},
+	{MAX98373_R2092_BDE_CLIPPER_MODE, 0x00},
+	{MAX98373_R2097_BDE_L1_THRESH, 0x00},
+	{MAX98373_R2098_BDE_L2_THRESH, 0x00},
+	{MAX98373_R2099_BDE_L3_THRESH, 0x00},
+	{MAX98373_R209A_BDE_L4_THRESH, 0x00},
+	{MAX98373_R209B_BDE_THRESH_HYST, 0x00},
+	{MAX98373_R20A8_BDE_L1_CFG_1, 0x00},
+	{MAX98373_R20A9_BDE_L1_CFG_2, 0x00},
+	{MAX98373_R20AA_BDE_L1_CFG_3, 0x00},
+	{MAX98373_R20AB_BDE_L2_CFG_1, 0x00},
+	{MAX98373_R20AC_BDE_L2_CFG_2, 0x00},
+	{MAX98373_R20AD_BDE_L2_CFG_3, 0x00},
+	{MAX98373_R20AE_BDE_L3_CFG_1, 0x00},
+	{MAX98373_R20AF_BDE_L3_CFG_2, 0x00},
+	{MAX98373_R20B0_BDE_L3_CFG_3, 0x00},
+	{MAX98373_R20B1_BDE_L4_CFG_1, 0x00},
+	{MAX98373_R20B2_BDE_L4_CFG_2, 0x00},
+	{MAX98373_R20B3_BDE_L4_CFG_3, 0x00},
+	{MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE, 0x00},
+	{MAX98373_R20B5_BDE_EN, 0x00},
+	{MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0x00},
+	{MAX98373_R20D1_DHT_CFG, 0x01},
+	{MAX98373_R20D2_DHT_ATTACK_CFG, 0x02},
+	{MAX98373_R20D3_DHT_RELEASE_CFG, 0x03},
+	{MAX98373_R20D4_DHT_EN, 0x00},
+	{MAX98373_R20E0_LIMITER_THRESH_CFG, 0x00},
+	{MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0x00},
+	{MAX98373_R20E2_LIMITER_EN, 0x00},
+	{MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG, 0x00},
+	{MAX98373_R20FF_GLOBAL_SHDN, 0x00},
+	{MAX98373_R21FF_REV_ID, 0x42},
+};
+
+static int max98373_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+	unsigned int format = 0;
+	unsigned int invert = 0;
+
+	dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		invert = MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE;
+		break;
+	default:
+		dev_err(codec->dev, "DAI invert mode unsupported\n");
+		return -EINVAL;
+	}
+
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2026_PCM_CLOCK_RATIO,
+		MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE,
+		invert);
+
+	/* interface format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		format = MAX98373_PCM_FORMAT_I2S;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		format = MAX98373_PCM_FORMAT_LJ;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		format = MAX98373_PCM_FORMAT_TDM_MODE1;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		format = MAX98373_PCM_FORMAT_TDM_MODE0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2024_PCM_DATA_FMT_CFG,
+		MAX98373_PCM_MODE_CFG_FORMAT_MASK,
+		format << MAX98373_PCM_MODE_CFG_FORMAT_SHIFT);
+
+	return 0;
+}
+
+/* BCLKs per LRCLK */
+static const int bclk_sel_table[] = {
+	32, 48, 64, 96, 128, 192, 256, 384, 512, 320,
+};
+
+static int max98373_get_bclk_sel(int bclk)
+{
+	int i;
+	/* match BCLKs per LRCLK */
+	for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
+		if (bclk_sel_table[i] == bclk)
+			return i + 2;
+	}
+	return 0;
+}
+static int max98373_set_clock(struct snd_soc_codec *codec,
+	struct snd_pcm_hw_params *params)
+{
+	struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+	/* BCLK/LRCLK ratio calculation */
+	int blr_clk_ratio = params_channels(params) * max98373->ch_size;
+	int value;
+
+	if (!max98373->tdm_mode) {
+		/* BCLK configuration */
+		value = max98373_get_bclk_sel(blr_clk_ratio);
+		if (!value) {
+			dev_err(codec->dev, "format unsupported %d\n",
+				params_format(params));
+			return -EINVAL;
+		}
+
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2026_PCM_CLOCK_RATIO,
+			MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+			value);
+	}
+	return 0;
+}
+
+static int max98373_dai_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params,
+	struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+	unsigned int sampling_rate = 0;
+	unsigned int chan_sz = 0;
+
+	/* pcm mode configuration */
+	switch (snd_pcm_format_width(params_format(params))) {
+	case 16:
+		chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+		break;
+	case 24:
+		chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+		break;
+	case 32:
+		chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+		break;
+	default:
+		dev_err(codec->dev, "format unsupported %d\n",
+			params_format(params));
+		goto err;
+	}
+
+	max98373->ch_size = snd_pcm_format_width(params_format(params));
+
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2024_PCM_DATA_FMT_CFG,
+		MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+	dev_dbg(codec->dev, "format supported %d",
+		params_format(params));
+
+	/* sampling rate configuration */
+	switch (params_rate(params)) {
+	case 8000:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_8000;
+		break;
+	case 11025:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_11025;
+		break;
+	case 12000:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_12000;
+		break;
+	case 16000:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_16000;
+		break;
+	case 22050:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_22050;
+		break;
+	case 24000:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_24000;
+		break;
+	case 32000:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_32000;
+		break;
+	case 44100:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_44100;
+		break;
+	case 48000:
+		sampling_rate = MAX98373_PCM_SR_SET1_SR_48000;
+		break;
+	default:
+		dev_err(codec->dev, "rate %d not supported\n",
+			params_rate(params));
+		goto err;
+	}
+	/* set DAI_SR to correct LRCLK frequency */
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2027_PCM_SR_SETUP_1,
+		MAX98373_PCM_SR_SET1_SR_MASK,
+		sampling_rate);
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2028_PCM_SR_SETUP_2,
+		MAX98373_PCM_SR_SET2_SR_MASK,
+		sampling_rate << MAX98373_PCM_SR_SET2_SR_SHIFT);
+
+	/* set sampling rate of IV */
+	if (max98373->interleave_mode &&
+	    sampling_rate > MAX98373_PCM_SR_SET1_SR_16000)
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2028_PCM_SR_SETUP_2,
+			MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+			sampling_rate - 3);
+	else
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2028_PCM_SR_SETUP_2,
+			MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+			sampling_rate);
+
+	return max98373_set_clock(codec, params);
+err:
+	return -EINVAL;
+}
+
+static int max98373_dai_tdm_slot(struct snd_soc_dai *dai,
+	unsigned int tx_mask, unsigned int rx_mask,
+	int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+	int bsel = 0;
+	unsigned int chan_sz = 0;
+	unsigned int mask;
+	int x, slot_found;
+
+	max98373->tdm_mode = true;
+
+	/* BCLK configuration */
+	bsel = max98373_get_bclk_sel(slots * slot_width);
+	if (bsel == 0) {
+		dev_err(codec->dev, "BCLK %d not supported\n",
+			slots * slot_width);
+		return -EINVAL;
+	}
+
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2026_PCM_CLOCK_RATIO,
+		MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+		bsel);
+
+	/* Channel size configuration */
+	switch (slot_width) {
+	case 16:
+		chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+		break;
+	case 24:
+		chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+		break;
+	case 32:
+		chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+		break;
+	default:
+		dev_err(codec->dev, "format unsupported %d\n",
+			slot_width);
+		return -EINVAL;
+	}
+
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2024_PCM_DATA_FMT_CFG,
+		MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+	/* Rx slot configuration */
+	slot_found = 0;
+	mask = rx_mask;
+	for (x = 0 ; x < 16 ; x++, mask >>= 1) {
+		if (mask & 0x1) {
+			if (slot_found == 0)
+				regmap_update_bits(max98373->regmap,
+					MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+					MAX98373_PCM_TO_SPK_CH0_SRC_MASK, x);
+			else
+				regmap_write(max98373->regmap,
+					MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+					x);
+			slot_found++;
+			if (slot_found > 1)
+				break;
+		}
+	}
+
+	/* Tx slot Hi-Z configuration */
+	regmap_write(max98373->regmap,
+		MAX98373_R2020_PCM_TX_HIZ_EN_1,
+		~tx_mask & 0xFF);
+	regmap_write(max98373->regmap,
+		MAX98373_R2021_PCM_TX_HIZ_EN_2,
+		(~tx_mask & 0xFF00) >> 8);
+
+	return 0;
+}
+
+#define MAX98373_RATES SNDRV_PCM_RATE_8000_96000
+
+#define MAX98373_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+	SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops max98373_dai_ops = {
+	.set_fmt = max98373_dai_set_fmt,
+	.hw_params = max98373_dai_hw_params,
+	.set_tdm_slot = max98373_dai_tdm_slot,
+};
+
+static int max98373_dac_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R20FF_GLOBAL_SHDN,
+			MAX98373_GLOBAL_EN_MASK, 1);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R20FF_GLOBAL_SHDN,
+			MAX98373_GLOBAL_EN_MASK, 0);
+		max98373->tdm_mode = 0;
+		break;
+	default:
+		return 0;
+	}
+	return 0;
+}
+
+static const char * const max98373_switch_text[] = {
+	"Left", "Right", "LeftRight"};
+
+static const struct soc_enum dai_sel_enum =
+	SOC_ENUM_SINGLE(MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+		MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT,
+		3, max98373_switch_text);
+
+static const struct snd_kcontrol_new max98373_dai_controls =
+	SOC_DAPM_ENUM("DAI Sel", dai_sel_enum);
+
+static const struct snd_kcontrol_new max98373_vi_control =
+	SOC_DAPM_SINGLE("Switch", MAX98373_R202C_PCM_TX_EN, 0, 1, 0);
+
+static const struct snd_kcontrol_new max98373_spkfb_control =
+	SOC_DAPM_SINGLE("Switch", MAX98373_R2043_AMP_EN, 1, 1, 0);
+
+static const struct snd_soc_dapm_widget max98373_dapm_widgets[] = {
+SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback",
+	MAX98373_R202B_PCM_RX_EN, 0, 0, max98373_dac_event,
+	SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
+	&max98373_dai_controls),
+SND_SOC_DAPM_OUTPUT("BE_OUT"),
+SND_SOC_DAPM_AIF_OUT("Voltage Sense", "HiFi Capture", 0,
+	MAX98373_R2047_IV_SENSE_ADC_EN, 0, 0),
+SND_SOC_DAPM_AIF_OUT("Current Sense", "HiFi Capture", 0,
+	MAX98373_R2047_IV_SENSE_ADC_EN, 1, 0),
+SND_SOC_DAPM_AIF_OUT("Speaker FB Sense", "HiFi Capture", 0,
+	SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_SWITCH("VI Sense", SND_SOC_NOPM, 0, 0,
+	&max98373_vi_control),
+SND_SOC_DAPM_SWITCH("SpkFB Sense", SND_SOC_NOPM, 0, 0,
+	&max98373_spkfb_control),
+SND_SOC_DAPM_SIGGEN("VMON"),
+SND_SOC_DAPM_SIGGEN("IMON"),
+SND_SOC_DAPM_SIGGEN("FBMON"),
+};
+
+static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, 0, -50, 0);
+static const DECLARE_TLV_DB_RANGE(max98373_spk_tlv,
+	0, 8, TLV_DB_SCALE_ITEM(0, 50, 0),
+	9, 10, TLV_DB_SCALE_ITEM(500, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_spkgain_max_tlv,
+	0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_step_size_tlv,
+	0, 1, TLV_DB_SCALE_ITEM(25, 25, 0),
+	2, 4, TLV_DB_SCALE_ITEM(100, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_spkgain_min_tlv,
+	0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_rotation_point_tlv,
+	0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
+	2, 7, TLV_DB_SCALE_ITEM(-200, -100, 0),
+	8, 9, TLV_DB_SCALE_ITEM(-1000, -200, 0),
+	10, 11, TLV_DB_SCALE_ITEM(-1500, -300, 0),
+	12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
+	14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_limiter_thresh_tlv,
+	0, 15, TLV_DB_SCALE_ITEM(0, -100, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
+	0, 60, TLV_DB_SCALE_ITEM(0, -25, 0),
+);
+
+static bool max98373_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case MAX98373_R2001_INT_RAW1 ... MAX98373_R200C_INT_EN3:
+	case MAX98373_R2010_IRQ_CTRL:
+	case MAX98373_R2014_THERM_WARN_THRESH
+		... MAX98373_R2018_THERM_FOLDBACK_EN:
+	case MAX98373_R201E_PIN_DRIVE_STRENGTH
+		... MAX98373_R2036_SOUNDWIRE_CTRL:
+	case MAX98373_R203D_AMP_DIG_VOL_CTRL ... MAX98373_R2043_AMP_EN:
+	case MAX98373_R2046_IV_SENSE_ADC_DSP_CFG
+		... MAX98373_R2047_IV_SENSE_ADC_EN:
+	case MAX98373_R2051_MEAS_ADC_SAMPLING_RATE
+		... MAX98373_R2056_MEAS_ADC_PVDD_CH_EN:
+	case MAX98373_R2090_BDE_LVL_HOLD ... MAX98373_R2092_BDE_CLIPPER_MODE:
+	case MAX98373_R2097_BDE_L1_THRESH
+		... MAX98373_R209B_BDE_THRESH_HYST:
+	case MAX98373_R20A8_BDE_L1_CFG_1 ... MAX98373_R20B3_BDE_L4_CFG_3:
+	case MAX98373_R20B5_BDE_EN ... MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+	case MAX98373_R20D1_DHT_CFG ... MAX98373_R20D4_DHT_EN:
+	case MAX98373_R20E0_LIMITER_THRESH_CFG ... MAX98373_R20E2_LIMITER_EN:
+	case MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG
+		... MAX98373_R20FF_GLOBAL_SHDN:
+	case MAX98373_R21FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+};
+
+static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+	case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+	case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+	case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+	case MAX98373_R21FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const char * const max98373_output_voltage_lvl_text[] = {
+	"5.43V", "6.09V", "6.83V", "7.67V", "8.60V",
+	"9.65V", "10.83V", "12.15V", "13.63V", "15.29V"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_out_volt_enum,
+			    MAX98373_R203E_AMP_PATH_GAIN, 0,
+			    max98373_output_voltage_lvl_text);
+
+static const char * const max98373_dht_attack_rate_text[] = {
+	"17.5us", "35us", "70us", "140us",
+	"280us", "560us", "1120us", "2240us"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_attack_rate_enum,
+			    MAX98373_R20D2_DHT_ATTACK_CFG, 0,
+			    max98373_dht_attack_rate_text);
+
+static const char * const max98373_dht_release_rate_text[] = {
+	"45ms", "225ms", "450ms", "1150ms",
+	"2250ms", "3100ms", "4500ms", "6750ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_release_rate_enum,
+			    MAX98373_R20D3_DHT_RELEASE_CFG, 0,
+			    max98373_dht_release_rate_text);
+
+static const char * const max98373_limiter_attack_rate_text[] = {
+	"10us", "20us", "40us", "80us",
+	"160us", "320us", "640us", "1.28ms",
+	"2.56ms", "5.12ms", "10.24ms", "20.48ms",
+	"40.96ms", "81.92ms", "16.384ms", "32.768ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_attack_rate_enum,
+			    MAX98373_R20E1_LIMITER_ATK_REL_RATES, 4,
+			    max98373_limiter_attack_rate_text);
+
+static const char * const max98373_limiter_release_rate_text[] = {
+	"40us", "80us", "160us", "320us",
+	"640us", "1.28ms", "2.56ms", "5.120ms",
+	"10.24ms", "20.48ms", "40.96ms", "81.92ms",
+	"163.84ms", "327.68ms", "655.36ms", "1310.72ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_release_rate_enum,
+			    MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0,
+			    max98373_limiter_release_rate_text);
+
+static const char * const max98373_ADC_samplerate_text[] = {
+	"333kHz", "192kHz", "64kHz", "48kHz"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
+			    MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
+			    max98373_ADC_samplerate_text);
+
+static const struct snd_kcontrol_new max98373_snd_controls[] = {
+SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
+	MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Volume Location Switch", MAX98373_R203F_AMP_DSP_CFG,
+	MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Up Switch", MAX98373_R203F_AMP_DSP_CFG,
+	MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Down Switch", MAX98373_R203F_AMP_DSP_CFG,
+	MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT, 1, 0),
+SOC_SINGLE("CLK Monitor Switch", MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG,
+	MAX98373_CLOCK_MON_SHIFT, 1, 0),
+SOC_SINGLE("Dither Switch", MAX98373_R203F_AMP_DSP_CFG,
+	MAX98373_AMP_DSP_CFG_DITH_SHIFT, 1, 0),
+SOC_SINGLE("DC Blocker Switch", MAX98373_R203F_AMP_DSP_CFG,
+	MAX98373_AMP_DSP_CFG_DCBLK_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Digital Volume", MAX98373_R203D_AMP_DIG_VOL_CTRL,
+	0, 0x7F, 0, max98373_digital_tlv),
+SOC_SINGLE_TLV("Speaker Volume", MAX98373_R203E_AMP_PATH_GAIN,
+	MAX98373_SPK_DIGI_GAIN_SHIFT, 10, 0, max98373_spk_tlv),
+SOC_SINGLE_TLV("FS Max Volume", MAX98373_R203E_AMP_PATH_GAIN,
+	MAX98373_FS_GAIN_MAX_SHIFT, 9, 0, max98373_spkgain_max_tlv),
+SOC_ENUM("Output Voltage", max98373_out_volt_enum),
+/* Dynamic Headroom Tracking */
+SOC_SINGLE("DHT Switch", MAX98373_R20D4_DHT_EN,
+	MAX98373_DHT_EN_SHIFT, 1, 0),
+SOC_SINGLE_TLV("DHT Gain Min", MAX98373_R20D1_DHT_CFG,
+	MAX98373_DHT_SPK_GAIN_MIN_SHIFT, 9, 0, max98373_dht_spkgain_min_tlv),
+SOC_SINGLE_TLV("DHT Rot Pnt", MAX98373_R20D1_DHT_CFG,
+	MAX98373_DHT_ROT_PNT_SHIFT, 15, 0, max98373_dht_rotation_point_tlv),
+SOC_SINGLE_TLV("DHT Attack Step", MAX98373_R20D2_DHT_ATTACK_CFG,
+	MAX98373_DHT_ATTACK_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_SINGLE_TLV("DHT Release Step", MAX98373_R20D3_DHT_RELEASE_CFG,
+	MAX98373_DHT_RELEASE_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_ENUM("DHT Attack Rate", max98373_dht_attack_rate_enum),
+SOC_ENUM("DHT Release Rate", max98373_dht_release_rate_enum),
+/* ADC configuration */
+SOC_SINGLE("ADC PVDD CH Switch", MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0, 1, 0),
+SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+	MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+	MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+	0, 0x3, 0),
+SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+	0, 0x3, 0),
+SOC_ENUM("ADC SampleRate", max98373_adc_samplerate_enum),
+/* Brownout Detection Engine */
+SOC_SINGLE("BDE Switch", MAX98373_R20B5_BDE_EN, MAX98373_BDE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Mute Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+	MAX98373_LVL4_MUTE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Hold Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+	MAX98373_LVL4_HOLD_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
+SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
+SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
+SOC_SINGLE("BDE Attack Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 4, 0xF, 0),
+SOC_SINGLE("BDE Release Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0, 0xF, 0),
+SOC_SINGLE_TLV("BDE LVL1 Clip Thresh", MAX98373_R20A9_BDE_L1_CFG_2,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Thresh", MAX98373_R20AC_BDE_L2_CFG_2,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Thresh", MAX98373_R20AF_BDE_L3_CFG_2,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Thresh", MAX98373_R20B2_BDE_L4_CFG_2,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Clip Gain Reduct", MAX98373_R20AA_BDE_L1_CFG_3,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Gain Reduct", MAX98373_R20AD_BDE_L2_CFG_3,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Gain Reduct", MAX98373_R20B0_BDE_L3_CFG_3,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Gain Reduct", MAX98373_R20B3_BDE_L4_CFG_3,
+	0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Limiter Thresh", MAX98373_R20A8_BDE_L1_CFG_1,
+	0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Limiter Thresh", MAX98373_R20AB_BDE_L2_CFG_1,
+	0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Limiter Thresh", MAX98373_R20AE_BDE_L3_CFG_1,
+	0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Limiter Thresh", MAX98373_R20B1_BDE_L4_CFG_1,
+	0, 0xF, 0, max98373_limiter_thresh_tlv),
+/* Limiter */
+SOC_SINGLE("Limiter Switch", MAX98373_R20E2_LIMITER_EN,
+	MAX98373_LIMITER_EN_SHIFT, 1, 0),
+SOC_SINGLE("Limiter Src Switch", MAX98373_R20E0_LIMITER_THRESH_CFG,
+	MAX98373_LIMITER_THRESH_SRC_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Limiter Thresh", MAX98373_R20E0_LIMITER_THRESH_CFG,
+	MAX98373_LIMITER_THRESH_SHIFT, 15, 0, max98373_limiter_thresh_tlv),
+SOC_ENUM("Limiter Attack Rate", max98373_limiter_attack_rate_enum),
+SOC_ENUM("Limiter Release Rate", max98373_limiter_release_rate_enum),
+};
+
+static const struct snd_soc_dapm_route max98373_audio_map[] = {
+	/* Plabyack */
+	{"DAI Sel Mux", "Left", "Amp Enable"},
+	{"DAI Sel Mux", "Right", "Amp Enable"},
+	{"DAI Sel Mux", "LeftRight", "Amp Enable"},
+	{"BE_OUT", NULL, "DAI Sel Mux"},
+	/* Capture */
+	{ "VI Sense", "Switch", "VMON" },
+	{ "VI Sense", "Switch", "IMON" },
+	{ "SpkFB Sense", "Switch", "FBMON" },
+	{ "Voltage Sense", NULL, "VI Sense" },
+	{ "Current Sense", NULL, "VI Sense" },
+	{ "Speaker FB Sense", NULL, "SpkFB Sense" },
+};
+
+static struct snd_soc_dai_driver max98373_dai[] = {
+	{
+		.name = "max98373-aif1",
+		.playback = {
+			.stream_name = "HiFi Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MAX98373_RATES,
+			.formats = MAX98373_FORMATS,
+		},
+		.capture = {
+			.stream_name = "HiFi Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = MAX98373_RATES,
+			.formats = MAX98373_FORMATS,
+		},
+		.ops = &max98373_dai_ops,
+	}
+};
+
+static int max98373_probe(struct snd_soc_codec *codec)
+{
+	struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+	codec->control_data = max98373->regmap;
+
+	/* Software Reset */
+	regmap_write(max98373->regmap,
+		MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+
+	/* IV default slot configuration */
+	regmap_write(max98373->regmap,
+		MAX98373_R2020_PCM_TX_HIZ_EN_1,
+		0xFF);
+	regmap_write(max98373->regmap,
+		MAX98373_R2021_PCM_TX_HIZ_EN_2,
+		0xFF);
+	/* L/R mix configuration */
+	regmap_write(max98373->regmap,
+		MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+		0x80);
+	regmap_write(max98373->regmap,
+		MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+		0x1);
+	/* Set inital volume (0dB) */
+	regmap_write(max98373->regmap,
+		MAX98373_R203D_AMP_DIG_VOL_CTRL,
+		0x00);
+	regmap_write(max98373->regmap,
+		MAX98373_R203E_AMP_PATH_GAIN,
+		0x00);
+	/* Enable DC blocker */
+	regmap_write(max98373->regmap,
+		MAX98373_R203F_AMP_DSP_CFG,
+		0x3);
+	/* Enable IMON VMON DC blocker */
+	regmap_write(max98373->regmap,
+		MAX98373_R2046_IV_SENSE_ADC_DSP_CFG,
+		0x7);
+	/* voltage, current slot configuration */
+	regmap_write(max98373->regmap,
+		MAX98373_R2022_PCM_TX_SRC_1,
+		(max98373->i_slot << MAX98373_PCM_TX_CH_SRC_A_I_SHIFT |
+		max98373->v_slot) & 0xFF);
+	if (max98373->v_slot < 8)
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2020_PCM_TX_HIZ_EN_1,
+			1 << max98373->v_slot, 0);
+	else
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2021_PCM_TX_HIZ_EN_2,
+			1 << (max98373->v_slot - 8), 0);
+
+	if (max98373->i_slot < 8)
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2020_PCM_TX_HIZ_EN_1,
+			1 << max98373->i_slot, 0);
+	else
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2021_PCM_TX_HIZ_EN_2,
+			1 << (max98373->i_slot - 8), 0);
+
+	/* speaker feedback slot configuration */
+	regmap_write(max98373->regmap,
+		MAX98373_R2023_PCM_TX_SRC_2,
+		max98373->spkfb_slot & 0xFF);
+
+	/* Set interleave mode */
+	if (max98373->interleave_mode)
+		regmap_update_bits(max98373->regmap,
+			MAX98373_R2024_PCM_DATA_FMT_CFG,
+			MAX98373_PCM_TX_CH_INTERLEAVE_MASK,
+			MAX98373_PCM_TX_CH_INTERLEAVE_MASK);
+
+	/* Speaker enable */
+	regmap_update_bits(max98373->regmap,
+		MAX98373_R2043_AMP_EN,
+		MAX98373_SPK_EN_MASK, 1);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max98373_suspend(struct device *dev)
+{
+	struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+	regcache_cache_only(max98373->regmap, true);
+	regcache_mark_dirty(max98373->regmap);
+	return 0;
+}
+static int max98373_resume(struct device *dev)
+{
+	struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+	regmap_write(max98373->regmap,
+		MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+	regcache_cache_only(max98373->regmap, false);
+	regcache_sync(max98373->regmap);
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops max98373_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(max98373_suspend, max98373_resume)
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_max98373 = {
+	.probe = max98373_probe,
+	.component_driver = {
+		.controls = max98373_snd_controls,
+		.num_controls = ARRAY_SIZE(max98373_snd_controls),
+		.dapm_widgets = max98373_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets),
+		.dapm_routes = max98373_audio_map,
+		.num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
+	},
+};
+
+static const struct regmap_config max98373_regmap = {
+	.reg_bits = 16,
+	.val_bits = 8,
+	.max_register = MAX98373_R21FF_REV_ID,
+	.reg_defaults  = max98373_reg,
+	.num_reg_defaults = ARRAY_SIZE(max98373_reg),
+	.readable_reg = max98373_readable_register,
+	.volatile_reg = max98373_volatile_reg,
+	.cache_type = REGCACHE_RBTREE,
+};
+
+static void max98373_slot_config(struct i2c_client *i2c,
+	struct max98373_priv *max98373)
+{
+	int value;
+	struct device *dev = &i2c->dev;
+
+	if (!device_property_read_u32(dev, "maxim,vmon-slot-no", &value))
+		max98373->v_slot = value & 0xF;
+	else
+		max98373->v_slot = 0;
+
+	if (!device_property_read_u32(dev, "maxim,imon-slot-no", &value))
+		max98373->i_slot = value & 0xF;
+	else
+		max98373->i_slot = 1;
+
+	if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
+		max98373->spkfb_slot = value & 0xF;
+	else
+		max98373->spkfb_slot = 2;
+}
+
+static int max98373_i2c_probe(struct i2c_client *i2c,
+	const struct i2c_device_id *id)
+{
+
+	int ret = 0;
+	int reg = 0;
+	struct max98373_priv *max98373 = NULL;
+
+	max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
+
+	if (!max98373) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	i2c_set_clientdata(i2c, max98373);
+
+	/* update interleave mode info */
+	if (device_property_read_bool(&i2c->dev, "maxim,interleave_mode"))
+		max98373->interleave_mode = 1;
+	else
+		max98373->interleave_mode = 0;
+
+
+	/* regmap initialization */
+	max98373->regmap
+		= devm_regmap_init_i2c(i2c, &max98373_regmap);
+	if (IS_ERR(max98373->regmap)) {
+		ret = PTR_ERR(max98373->regmap);
+		dev_err(&i2c->dev,
+			"Failed to allocate regmap: %d\n", ret);
+		return ret;
+	}
+
+	/* Check Revision ID */
+	ret = regmap_read(max98373->regmap,
+		MAX98373_R21FF_REV_ID, &reg);
+	if (ret < 0) {
+		dev_err(&i2c->dev,
+			"Failed to read: 0x%02X\n", MAX98373_R21FF_REV_ID);
+		return ret;
+	}
+	dev_info(&i2c->dev, "MAX98373 revisionID: 0x%02X\n", reg);
+
+	/* voltage/current slot configuration */
+	max98373_slot_config(i2c, max98373);
+
+	/* codec registeration */
+	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98373,
+		max98373_dai, ARRAY_SIZE(max98373_dai));
+	if (ret < 0)
+		dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+
+	return ret;
+}
+
+static int max98373_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	return 0;
+}
+
+static const struct i2c_device_id max98373_i2c_id[] = {
+	{ "max98373", 0},
+	{ },
+};
+
+MODULE_DEVICE_TABLE(i2c, max98373_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id max98373_of_match[] = {
+	{ .compatible = "maxim,max98373", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, max98373_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max98373_acpi_match[] = {
+	{ "MX98373", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, max98373_acpi_match);
+#endif
+
+static struct i2c_driver max98373_i2c_driver = {
+	.driver = {
+		.name = "max98373",
+		.of_match_table = of_match_ptr(max98373_of_match),
+		.acpi_match_table = ACPI_PTR(max98373_acpi_match),
+		.pm = &max98373_pm,
+	},
+	.probe = max98373_i2c_probe,
+	.remove = max98373_i2c_remove,
+	.id_table = max98373_i2c_id,
+};
+
+module_i2c_driver(max98373_i2c_driver)
+
+MODULE_DESCRIPTION("ALSA SoC MAX98373 driver");
+MODULE_AUTHOR("Ryan Lee <ryans.lee@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
new file mode 100644
index 0000000000000000000000000000000000000000..d0b359d0cf8c6c12dfd7571ae635109574d32aed
--- /dev/null
+++ b/sound/soc/codecs/max98373.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+#ifndef _MAX98373_H
+#define _MAX98373_H
+
+#define MAX98373_R2000_SW_RESET 0x2000
+#define MAX98373_R2001_INT_RAW1 0x2001
+#define MAX98373_R2002_INT_RAW2 0x2002
+#define MAX98373_R2003_INT_RAW3 0x2003
+#define MAX98373_R2004_INT_STATE1 0x2004
+#define MAX98373_R2005_INT_STATE2 0x2005
+#define MAX98373_R2006_INT_STATE3 0x2006
+#define MAX98373_R2007_INT_FLAG1 0x2007
+#define MAX98373_R2008_INT_FLAG2 0x2008
+#define MAX98373_R2009_INT_FLAG3 0x2009
+#define MAX98373_R200A_INT_EN1 0x200A
+#define MAX98373_R200B_INT_EN2 0x200B
+#define MAX98373_R200C_INT_EN3 0x200C
+#define MAX98373_R200D_INT_FLAG_CLR1 0x200D
+#define MAX98373_R200E_INT_FLAG_CLR2 0x200E
+#define MAX98373_R200F_INT_FLAG_CLR3 0x200F
+#define MAX98373_R2010_IRQ_CTRL 0x2010
+#define MAX98373_R2014_THERM_WARN_THRESH 0x2014
+#define MAX98373_R2015_THERM_SHDN_THRESH 0x2015
+#define MAX98373_R2016_THERM_HYSTERESIS 0x2016
+#define MAX98373_R2017_THERM_FOLDBACK_SET 0x2017
+#define MAX98373_R2018_THERM_FOLDBACK_EN 0x2018
+#define MAX98373_R201E_PIN_DRIVE_STRENGTH 0x201E
+#define MAX98373_R2020_PCM_TX_HIZ_EN_1 0x2020
+#define MAX98373_R2021_PCM_TX_HIZ_EN_2 0x2021
+#define MAX98373_R2022_PCM_TX_SRC_1 0x2022
+#define MAX98373_R2023_PCM_TX_SRC_2 0x2023
+#define MAX98373_R2024_PCM_DATA_FMT_CFG	0x2024
+#define MAX98373_R2025_AUDIO_IF_MODE 0x2025
+#define MAX98373_R2026_PCM_CLOCK_RATIO 0x2026
+#define MAX98373_R2027_PCM_SR_SETUP_1 0x2027
+#define MAX98373_R2028_PCM_SR_SETUP_2 0x2028
+#define MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 0x2029
+#define MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2 0x202A
+#define MAX98373_R202B_PCM_RX_EN 0x202B
+#define MAX98373_R202C_PCM_TX_EN 0x202C
+#define MAX98373_R202E_ICC_RX_CH_EN_1 0x202E
+#define MAX98373_R202F_ICC_RX_CH_EN_2 0x202F
+#define MAX98373_R2030_ICC_TX_HIZ_EN_1 0x2030
+#define MAX98373_R2031_ICC_TX_HIZ_EN_2 0x2031
+#define MAX98373_R2032_ICC_LINK_EN_CFG 0x2032
+#define MAX98373_R2034_ICC_TX_CNTL 0x2034
+#define MAX98373_R2035_ICC_TX_EN 0x2035
+#define MAX98373_R2036_SOUNDWIRE_CTRL 0x2036
+#define MAX98373_R203D_AMP_DIG_VOL_CTRL 0x203D
+#define MAX98373_R203E_AMP_PATH_GAIN 0x203E
+#define MAX98373_R203F_AMP_DSP_CFG 0x203F
+#define MAX98373_R2040_TONE_GEN_CFG 0x2040
+#define MAX98373_R2041_AMP_CFG 0x2041
+#define MAX98373_R2042_AMP_EDGE_RATE_CFG 0x2042
+#define MAX98373_R2043_AMP_EN 0x2043
+#define MAX98373_R2046_IV_SENSE_ADC_DSP_CFG 0x2046
+#define MAX98373_R2047_IV_SENSE_ADC_EN 0x2047
+#define MAX98373_R2051_MEAS_ADC_SAMPLING_RATE 0x2051
+#define MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG 0x2052
+#define MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG 0x2053
+#define MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK 0x2054
+#define MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK 0x2055
+#define MAX98373_R2056_MEAS_ADC_PVDD_CH_EN 0x2056
+#define MAX98373_R2090_BDE_LVL_HOLD 0x2090
+#define MAX98373_R2091_BDE_GAIN_ATK_REL_RATE 0x2091
+#define MAX98373_R2092_BDE_CLIPPER_MODE 0x2092
+#define MAX98373_R2097_BDE_L1_THRESH 0x2097
+#define MAX98373_R2098_BDE_L2_THRESH 0x2098
+#define MAX98373_R2099_BDE_L3_THRESH 0x2099
+#define MAX98373_R209A_BDE_L4_THRESH 0x209A
+#define MAX98373_R209B_BDE_THRESH_HYST 0x209B
+#define MAX98373_R20A8_BDE_L1_CFG_1 0x20A8
+#define MAX98373_R20A9_BDE_L1_CFG_2 0x20A9
+#define MAX98373_R20AA_BDE_L1_CFG_3 0x20AA
+#define MAX98373_R20AB_BDE_L2_CFG_1 0x20AB
+#define MAX98373_R20AC_BDE_L2_CFG_2 0x20AC
+#define MAX98373_R20AD_BDE_L2_CFG_3 0x20AD
+#define MAX98373_R20AE_BDE_L3_CFG_1 0x20AE
+#define MAX98373_R20AF_BDE_L3_CFG_2 0x20AF
+#define MAX98373_R20B0_BDE_L3_CFG_3 0x20B0
+#define MAX98373_R20B1_BDE_L4_CFG_1 0x20B1
+#define MAX98373_R20B2_BDE_L4_CFG_2 0x20B2
+#define MAX98373_R20B3_BDE_L4_CFG_3 0x20B3
+#define MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE 0x20B4
+#define MAX98373_R20B5_BDE_EN 0x20B5
+#define MAX98373_R20B6_BDE_CUR_STATE_READBACK 0x20B6
+#define MAX98373_R20D1_DHT_CFG 0x20D1
+#define MAX98373_R20D2_DHT_ATTACK_CFG 0x20D2
+#define MAX98373_R20D3_DHT_RELEASE_CFG 0x20D3
+#define MAX98373_R20D4_DHT_EN 0x20D4
+#define MAX98373_R20E0_LIMITER_THRESH_CFG 0x20E0
+#define MAX98373_R20E1_LIMITER_ATK_REL_RATES 0x20E1
+#define MAX98373_R20E2_LIMITER_EN 0x20E2
+#define MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG 0x20FE
+#define MAX98373_R20FF_GLOBAL_SHDN 0x20FF
+#define MAX98373_R21FF_REV_ID 0x21FF
+
+/* MAX98373_R2022_PCM_TX_SRC_1 */
+#define MAX98373_PCM_TX_CH_SRC_A_V_SHIFT (0)
+#define MAX98373_PCM_TX_CH_SRC_A_I_SHIFT (4)
+
+/* MAX98373_R2024_PCM_DATA_FMT_CFG */
+#define MAX98373_PCM_MODE_CFG_FORMAT_MASK (0x7 << 3)
+#define MAX98373_PCM_MODE_CFG_FORMAT_SHIFT (3)
+#define MAX98373_PCM_TX_CH_INTERLEAVE_MASK (0x1 << 2)
+#define MAX98373_PCM_FORMAT_I2S (0x0 << 0)
+#define MAX98373_PCM_FORMAT_LJ (0x1 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE0 (0x3 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE1 (0x4 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE2 (0x5 << 0)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_32 (0x3 << 6)
+
+/* MAX98373_R2026_PCM_CLOCK_RATIO */
+#define MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE (0x1 << 4)
+#define MAX98373_PCM_CLK_SETUP_BSEL_MASK (0xF << 0)
+
+/* MAX98373_R2027_PCM_SR_SETUP_1 */
+#define MAX98373_PCM_SR_SET1_SR_MASK (0xF << 0)
+#define MAX98373_PCM_SR_SET1_SR_8000 (0x0 << 0)
+#define MAX98373_PCM_SR_SET1_SR_11025 (0x1 << 0)
+#define MAX98373_PCM_SR_SET1_SR_12000 (0x2 << 0)
+#define MAX98373_PCM_SR_SET1_SR_16000 (0x3 << 0)
+#define MAX98373_PCM_SR_SET1_SR_22050 (0x4 << 0)
+#define MAX98373_PCM_SR_SET1_SR_24000 (0x5 << 0)
+#define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0)
+#define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0)
+#define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0)
+
+/* MAX98373_R2028_PCM_SR_SETUP_2 */
+#define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4)
+#define MAX98373_PCM_SR_SET2_SR_SHIFT (4)
+#define MAX98373_PCM_SR_SET2_IVADC_SR_MASK (0xF << 0)
+
+/* MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 */
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_MASK (0x3 << 6)
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT (6)
+#define MAX98373_PCM_TO_SPK_CH0_SRC_MASK (0xF << 0)
+
+/* MAX98373_R203E_AMP_PATH_GAIN */
+#define MAX98373_SPK_DIGI_GAIN_MASK (0xF << 4)
+#define MAX98373_SPK_DIGI_GAIN_SHIFT (4)
+#define MAX98373_FS_GAIN_MAX_MASK (0xF << 0)
+#define MAX98373_FS_GAIN_MAX_SHIFT (0)
+
+/* MAX98373_R203F_AMP_DSP_CFG */
+#define MAX98373_AMP_DSP_CFG_DCBLK_SHIFT (0)
+#define MAX98373_AMP_DSP_CFG_DITH_SHIFT (1)
+#define MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT (2)
+#define MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT (3)
+#define MAX98373_AMP_DSP_CFG_DAC_INV_SHIFT (5)
+#define MAX98373_AMP_VOL_SEL_SHIFT (7)
+
+/* MAX98373_R2043_AMP_EN */
+#define MAX98373_SPKFB_EN_MASK (0x1 << 1)
+#define MAX98373_SPK_EN_MASK (0x1 << 0)
+#define MAX98373_SPKFB_EN_SHIFT (1)
+
+/*MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG */
+#define MAX98373_FLT_EN_SHIFT (4)
+
+/* MAX98373_R20B2_BDE_L4_CFG_2 */
+#define MAX98373_LVL4_MUTE_EN_SHIFT (7)
+#define MAX98373_LVL4_HOLD_EN_SHIFT (6)
+
+/* MAX98373_R20B5_BDE_EN */
+#define MAX98373_BDE_EN_SHIFT (0)
+
+/* MAX98373_R20D1_DHT_CFG */
+#define MAX98373_DHT_SPK_GAIN_MIN_SHIFT	(4)
+#define MAX98373_DHT_ROT_PNT_SHIFT	(0)
+
+/* MAX98373_R20D2_DHT_ATTACK_CFG */
+#define MAX98373_DHT_ATTACK_STEP_SHIFT (3)
+#define MAX98373_DHT_ATTACK_RATE_SHIFT (0)
+
+/* MAX98373_R20D3_DHT_RELEASE_CFG */
+#define MAX98373_DHT_RELEASE_STEP_SHIFT (3)
+#define MAX98373_DHT_RELEASE_RATE_SHIFT (0)
+
+/* MAX98373_R20D4_DHT_EN */
+#define MAX98373_DHT_EN_SHIFT (0)
+
+/* MAX98373_R20E0_LIMITER_THRESH_CFG */
+#define MAX98373_LIMITER_THRESH_SHIFT (2)
+#define MAX98373_LIMITER_THRESH_SRC_SHIFT (0)
+
+/* MAX98373_R20E2_LIMITER_EN */
+#define MAX98373_LIMITER_EN_SHIFT (0)
+
+/* MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG */
+#define MAX98373_CLOCK_MON_SHIFT (0)
+
+/* MAX98373_R20FF_GLOBAL_SHDN */
+#define MAX98373_GLOBAL_EN_MASK (0x1 << 0)
+
+/* MAX98373_R2000_SW_RESET */
+#define MAX98373_SOFT_RESET (0x1 << 0)
+
+struct max98373_priv {
+	struct regmap *regmap;
+	unsigned int v_slot;
+	unsigned int i_slot;
+	unsigned int spkfb_slot;
+	bool interleave_mode;
+	unsigned int ch_size;
+	bool tdm_mode;
+};
+#endif
diff --git a/sound/soc/codecs/max98926.c b/sound/soc/codecs/max98926.c
index 03d07bf4d942198af925cb33fe5d65de9883f3bf..7b1d1b0fa8796d379e650cbdf6c0eda623c6aac7 100644
--- a/sound/soc/codecs/max98926.c
+++ b/sound/soc/codecs/max98926.c
@@ -490,7 +490,7 @@ static int max98926_probe(struct snd_soc_codec *codec)
 	struct max98926_priv *max98926 = snd_soc_codec_get_drvdata(codec);
 
 	max98926->codec = codec;
-	codec->control_data = max98926->regmap;
+
 	/* Hi-Z all the slots */
 	regmap_write(max98926->regmap, MAX98926_DOUT_HIZ_CFG4, 0xF0);
 	return 0;
diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
index a1d39353719d72cb4e3f0f1fcaa6b9b69d16acbf..f701fdc81175fca40a55d3a0f24d5a74171f150b 100644
--- a/sound/soc/codecs/max98927.c
+++ b/sound/soc/codecs/max98927.c
@@ -682,7 +682,6 @@ static int max98927_probe(struct snd_soc_codec *codec)
 	struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
 
 	max98927->codec = codec;
-	codec->control_data = max98927->regmap;
 
 	/* Software Reset */
 	regmap_write(max98927->regmap,
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 4fd8d1dc4eefca8624f33819706de3fc79996eb1..be7a45f05bbffdaa861495f20834a8de03032304 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -610,6 +610,9 @@ static int mc13783_probe(struct snd_soc_codec *codec)
 {
 	struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
 
+	snd_soc_codec_init_regmap(codec,
+				  dev_get_regmap(codec->dev->parent, NULL));
+
 	/* these are the reset values */
 	mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX0, 0x25893);
 	mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX1, 0x00d35A);
@@ -728,15 +731,9 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = {
 	}
 };
 
-static struct regmap *mc13783_get_regmap(struct device *dev)
-{
-	return dev_get_regmap(dev->parent, NULL);
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_mc13783 = {
 	.probe		= mc13783_probe,
 	.remove		= mc13783_remove,
-	.get_regmap	= mc13783_get_regmap,
 	.component_driver = {
 		.controls		= mc13783_control_list,
 		.num_controls		= ARRAY_SIZE(mc13783_control_list),
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 066ea2f4ce7b02a8f2cc58c2920b2ef7b6f2b160..44062bb7bf2fc1a3ff797b00314662fbaa591ba6 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -712,6 +712,8 @@ static int pm8916_wcd_analog_probe(struct snd_soc_codec *codec)
 		return err;
 	}
 
+	snd_soc_codec_init_regmap(codec,
+				  dev_get_regmap(codec->dev->parent, NULL));
 	snd_soc_codec_set_drvdata(codec, priv);
 	priv->pmic_rev = snd_soc_read(codec, CDC_D_REVISION1);
 	priv->codec_version = snd_soc_read(codec, CDC_D_PERPH_SUBTYPE);
@@ -943,11 +945,6 @@ static int pm8916_wcd_analog_set_jack(struct snd_soc_codec *codec,
 	return 0;
 }
 
-static struct regmap *pm8916_get_regmap(struct device *dev)
-{
-	return dev_get_regmap(dev->parent, NULL);
-}
-
 static irqreturn_t mbhc_btn_release_irq_handler(int irq, void *arg)
 {
 	struct pm8916_wcd_analog_priv *priv = arg;
@@ -1082,7 +1079,6 @@ static const struct snd_soc_codec_driver pm8916_wcd_analog = {
 	.probe = pm8916_wcd_analog_probe,
 	.remove = pm8916_wcd_analog_remove,
 	.set_jack = pm8916_wcd_analog_set_jack,
-	.get_regmap = pm8916_get_regmap,
 	.component_driver = {
 		.controls = pm8916_wcd_analog_snd_controls,
 		.num_controls = ARRAY_SIZE(pm8916_wcd_analog_snd_controls),
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index f9c9933acffb60ee50c9c7ec062e3cc278bedf60..b08fb7e243c3ce3ca75d08a78f79a4e7ce6b8e4f 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -233,6 +233,41 @@ static SOC_ENUM_SINGLE_DECL(
 static const struct snd_kcontrol_new digital_ch1_mux =
 	SOC_DAPM_ENUM("Digital CH1 Select", digital_ch1_enum);
 
+static int adc_power_control(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *k, int  event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+	if (SND_SOC_DAPM_EVENT_ON(event)) {
+		msleep(300);
+		/* DO12 and DO34 pad output enable */
+		regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+			NAU8540_I2S_DO12_TRI, 0);
+		regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+			NAU8540_I2S_DO34_TRI, 0);
+	} else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+			NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+		regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+			NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
+	}
+	return 0;
+}
+
+static int aiftx_power_control(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *k, int  event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+	if (SND_SOC_DAPM_EVENT_OFF(event)) {
+		regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0001);
+		regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0000);
+	}
+	return 0;
+}
+
 static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
 	SND_SOC_DAPM_SUPPLY("MICBIAS2", NAU8540_REG_MIC_BIAS, 11, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("MICBIAS1", NAU8540_REG_MIC_BIAS, 10, 0, NULL, 0),
@@ -247,14 +282,18 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
 	SND_SOC_DAPM_PGA("Frontend PGA3", NAU8540_REG_PWR, 14, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("Frontend PGA4", NAU8540_REG_PWR, 15, 0, NULL, 0),
 
-	SND_SOC_DAPM_ADC("ADC1", NULL,
-		NAU8540_REG_POWER_MANAGEMENT, 0, 0),
-	SND_SOC_DAPM_ADC("ADC2", NULL,
-		NAU8540_REG_POWER_MANAGEMENT, 1, 0),
-	SND_SOC_DAPM_ADC("ADC3", NULL,
-		NAU8540_REG_POWER_MANAGEMENT, 2, 0),
-	SND_SOC_DAPM_ADC("ADC4", NULL,
-		NAU8540_REG_POWER_MANAGEMENT, 3, 0),
+	SND_SOC_DAPM_ADC_E("ADC1", NULL,
+		NAU8540_REG_POWER_MANAGEMENT, 0, 0, adc_power_control,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_ADC_E("ADC2", NULL,
+		NAU8540_REG_POWER_MANAGEMENT, 1, 0, adc_power_control,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_ADC_E("ADC3", NULL,
+		NAU8540_REG_POWER_MANAGEMENT, 2, 0, adc_power_control,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_ADC_E("ADC4", NULL,
+		NAU8540_REG_POWER_MANAGEMENT, 3, 0, adc_power_control,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
 	SND_SOC_DAPM_PGA("ADC CH1", NAU8540_REG_ANALOG_PWR, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("ADC CH2", NAU8540_REG_ANALOG_PWR, 1, 0, NULL, 0),
@@ -270,7 +309,8 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
 	SND_SOC_DAPM_MUX("Digital CH1 Mux",
 		SND_SOC_NOPM, 0, 0, &digital_ch1_mux),
 
-	SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT_E("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0,
+		aiftx_power_control, SND_SOC_DAPM_POST_PMD),
 };
 
 static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
@@ -575,7 +615,8 @@ static void nau8540_fll_apply(struct regmap *regmap,
 		NAU8540_CLK_SRC_MASK | NAU8540_CLK_MCLK_SRC_MASK,
 		NAU8540_CLK_SRC_MCLK | fll_param->mclk_src);
 	regmap_update_bits(regmap, NAU8540_REG_FLL1,
-		NAU8540_FLL_RATIO_MASK, fll_param->ratio);
+		NAU8540_FLL_RATIO_MASK | NAU8540_ICTRL_LATCH_MASK,
+		fll_param->ratio | (0x6 << NAU8540_ICTRL_LATCH_SFT));
 	/* FLL 16-bit fractional input */
 	regmap_write(regmap, NAU8540_REG_FLL2, fll_param->fll_frac);
 	/* FLL 10-bit integer input */
@@ -596,13 +637,14 @@ static void nau8540_fll_apply(struct regmap *regmap,
 			NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
 			NAU8540_FLL_FTR_SW_FILTER);
 		regmap_update_bits(regmap, NAU8540_REG_FLL6,
-			NAU8540_SDM_EN, NAU8540_SDM_EN);
+			NAU8540_SDM_EN | NAU8540_CUTOFF500,
+			NAU8540_SDM_EN | NAU8540_CUTOFF500);
 	} else {
 		regmap_update_bits(regmap, NAU8540_REG_FLL5,
 			NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
 			NAU8540_FLL_FTR_SW_MASK, NAU8540_FLL_FTR_SW_ACCU);
-		regmap_update_bits(regmap,
-			NAU8540_REG_FLL6, NAU8540_SDM_EN, 0);
+		regmap_update_bits(regmap, NAU8540_REG_FLL6,
+			NAU8540_SDM_EN | NAU8540_CUTOFF500, 0);
 	}
 }
 
@@ -617,17 +659,22 @@ static int nau8540_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
 	switch (pll_id) {
 	case NAU8540_CLK_FLL_MCLK:
 		regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
-			NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_MCLK);
+			NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+			NAU8540_FLL_CLK_SRC_MCLK | 0);
 		break;
 
 	case NAU8540_CLK_FLL_BLK:
 		regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
-			NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_BLK);
+			NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+			NAU8540_FLL_CLK_SRC_BLK |
+			(0xf << NAU8540_GAIN_ERR_SFT));
 		break;
 
 	case NAU8540_CLK_FLL_FS:
 		regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
-			NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_FS);
+			NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+			NAU8540_FLL_CLK_SRC_FS |
+			(0xf << NAU8540_GAIN_ERR_SFT));
 		break;
 
 	default:
@@ -710,9 +757,24 @@ static void nau8540_init_regs(struct nau8540 *nau8540)
 	regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL,
 		NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN,
 		NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN);
-	/* ADC OSR selection, CLK_ADC = Fs * OSR */
+	/* ADC OSR selection, CLK_ADC = Fs * OSR;
+	 * Channel time alignment enable.
+	 */
 	regmap_update_bits(regmap, NAU8540_REG_ADC_SAMPLE_RATE,
-		NAU8540_ADC_OSR_MASK, NAU8540_ADC_OSR_64);
+		NAU8540_CH_SYNC | NAU8540_ADC_OSR_MASK,
+		NAU8540_CH_SYNC | NAU8540_ADC_OSR_64);
+	/* PGA input mode selection */
+	regmap_update_bits(regmap, NAU8540_REG_FEPGA1,
+		NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT,
+		NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT);
+	regmap_update_bits(regmap, NAU8540_REG_FEPGA2,
+		NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT,
+		NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT);
+	/* DO12 and DO34 pad output disable */
+	regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL1,
+		NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+	regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL2,
+		NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
 }
 
 static int __maybe_unused nau8540_suspend(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/nau8540.h b/sound/soc/codecs/nau8540.h
index 5db5b224944dd12ceff7df4d8244bc7bfd8d48e1..732b490edf81488da4567fbb062cfa5938702f45 100644
--- a/sound/soc/codecs/nau8540.h
+++ b/sound/soc/codecs/nau8540.h
@@ -100,9 +100,13 @@
 #define NAU8540_CLK_MCLK_SRC_MASK	0xf
 
 /* FLL1 (0x04) */
+#define NAU8540_ICTRL_LATCH_SFT	10
+#define NAU8540_ICTRL_LATCH_MASK	(0x7 << NAU8540_ICTRL_LATCH_SFT)
 #define NAU8540_FLL_RATIO_MASK	0x7f
 
 /* FLL3 (0x06) */
+#define NAU8540_GAIN_ERR_SFT		12
+#define NAU8540_GAIN_ERR_MASK		(0xf << NAU8540_GAIN_ERR_SFT)
 #define NAU8540_FLL_CLK_SRC_SFT	10
 #define NAU8540_FLL_CLK_SRC_MASK	(0x3 << NAU8540_FLL_CLK_SRC_SFT)
 #define NAU8540_FLL_CLK_SRC_MCLK	(0 << NAU8540_FLL_CLK_SRC_SFT)
@@ -127,6 +131,7 @@
 /* FLL6 (0x9) */
 #define NAU8540_DCO_EN			(0x1 << 15)
 #define NAU8540_SDM_EN			(0x1 << 14)
+#define NAU8540_CUTOFF500		(0x1 << 13)
 
 /* PCM_CTRL0 (0x10) */
 #define NAU8540_I2S_BP_SFT		7
@@ -146,6 +151,7 @@
 #define NAU8540_I2S_DF_PCM_AB		0x3
 
 /* PCM_CTRL1 (0x11) */
+#define NAU8540_I2S_DO12_TRI		(0x1 << 15)
 #define NAU8540_I2S_LRC_DIV_SFT	12
 #define NAU8540_I2S_LRC_DIV_MASK	(0x3 << NAU8540_I2S_LRC_DIV_SFT)
 #define NAU8540_I2S_DO12_OE		(0x1 << 4)
@@ -156,6 +162,7 @@
 #define NAU8540_I2S_BLK_DIV_MASK	0x7
 
 /* PCM_CTRL1 (0x12) */
+#define NAU8540_I2S_DO34_TRI		(0x1 << 15)
 #define NAU8540_I2S_DO34_OE		(0x1 << 11)
 #define NAU8540_I2S_TSLOT_L_MASK	0x3ff
 
@@ -165,6 +172,7 @@
 #define NAU8540_TDM_TX_MASK		0xf
 
 /* ADC_SAMPLE_RATE (0x3A) */
+#define NAU8540_CH_SYNC		(0x1 << 14)
 #define NAU8540_ADC_OSR_MASK		0x3
 #define NAU8540_ADC_OSR_256		0x3
 #define NAU8540_ADC_OSR_128		0x2
@@ -183,6 +191,18 @@
 #define NAU8540_PRECHARGE_DIS		(0x1 << 13)
 #define NAU8540_GLOBAL_BIAS_EN	(0x1 << 12)
 
+/* FEPGA1 (0x69) */
+#define NAU8540_FEPGA1_MODCH2_SHT_SFT	7
+#define NAU8540_FEPGA1_MODCH2_SHT	(0x1 << NAU8540_FEPGA1_MODCH2_SHT_SFT)
+#define NAU8540_FEPGA1_MODCH1_SHT_SFT	3
+#define NAU8540_FEPGA1_MODCH1_SHT	(0x1 << NAU8540_FEPGA1_MODCH1_SHT_SFT)
+
+/* FEPGA2 (0x6A) */
+#define NAU8540_FEPGA2_MODCH4_SHT_SFT	7
+#define NAU8540_FEPGA2_MODCH4_SHT	(0x1 << NAU8540_FEPGA2_MODCH4_SHT_SFT)
+#define NAU8540_FEPGA2_MODCH3_SHT_SFT	3
+#define NAU8540_FEPGA2_MODCH3_SHT	(0x1 << NAU8540_FEPGA2_MODCH3_SHT_SFT)
+
 
 /* System Clock Source */
 enum {
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 0240759f951c7ea22b06004aa0e26320bba224fa..088e0cef4cb8d469f17c24f22941e045fbc5f1a8 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -43,7 +43,7 @@ static bool nau8824_is_jack_inserted(struct nau8824 *nau8824);
 
 /* the parameter threshold of FLL */
 #define NAU_FREF_MAX 13500000
-#define NAU_FVCO_MAX 124000000
+#define NAU_FVCO_MAX 100000000
 #define NAU_FVCO_MIN 90000000
 
 /* scaling for mclk from sysclk_src output */
@@ -811,7 +811,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
 		NAU8824_JD_SLEEP_MODE, NAU8824_JD_SLEEP_MODE);
 
 	/* Close clock for jack type detection at manual mode */
-	nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
+	if (dapm->bias_level < SND_SOC_BIAS_PREPARE)
+		nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
 }
 
 static void nau8824_jdet_work(struct work_struct *work)
@@ -843,6 +844,11 @@ static void nau8824_jdet_work(struct work_struct *work)
 	event_mask |= SND_JACK_HEADSET;
 	snd_soc_jack_report(nau8824->jack, event, event_mask);
 
+	/* Enable short key press and release interruption. */
+	regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
+		NAU8824_IRQ_KEY_RELEASE_DIS |
+		NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+
 	nau8824_sema_release(nau8824);
 }
 
@@ -850,15 +856,15 @@ static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
 {
 	struct regmap *regmap = nau8824->regmap;
 
-	/* Enable jack ejection, short key press and release interruption. */
+	/* Enable jack ejection interruption. */
 	regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING_1,
 		NAU8824_IRQ_INSERT_EN | NAU8824_IRQ_EJECT_EN,
 		NAU8824_IRQ_EJECT_EN);
 	regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
-		NAU8824_IRQ_EJECT_DIS | NAU8824_IRQ_KEY_RELEASE_DIS |
-		NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+		NAU8824_IRQ_EJECT_DIS, 0);
 	/* Enable internal VCO needed for interruptions */
-	nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
+	if (nau8824->dapm->bias_level < SND_SOC_BIAS_PREPARE)
+		nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
 	regmap_update_bits(regmap, NAU8824_REG_ENA_CTRL,
 		NAU8824_JD_SLEEP_MODE, 0);
 }
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index e853a6dfd33b0ee7b8bf700dffea8b1e477f2d09..a1b697b6fb6483a357f68efa5818725a2a6e62e8 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -194,10 +194,10 @@ static const struct reg_default nau8825_reg_defaults[] = {
 
 /* register backup table when cross talk detection */
 static struct reg_default nau8825_xtalk_baktab[] = {
-	{ NAU8825_REG_ADC_DGAIN_CTRL, 0 },
+	{ NAU8825_REG_ADC_DGAIN_CTRL, 0x00cf },
 	{ NAU8825_REG_HSVOL_CTRL, 0 },
-	{ NAU8825_REG_DACL_CTRL, 0 },
-	{ NAU8825_REG_DACR_CTRL, 0 },
+	{ NAU8825_REG_DACL_CTRL, 0x00cf },
+	{ NAU8825_REG_DACR_CTRL, 0x02cf },
 };
 
 static const unsigned short logtable[256] = {
@@ -245,13 +245,14 @@ static const unsigned short logtable[256] = {
  * tasks are allowed to acquire the semaphore, calling this function will
  * put the task to sleep. If the semaphore is not released within the
  * specified number of jiffies, this function returns.
- * Acquires the semaphore without jiffies. If no more tasks are allowed
- * to acquire the semaphore, calling this function will put the task to
- * sleep until the semaphore is released.
  * If the semaphore is not released within the specified number of jiffies,
- * this function returns -ETIME.
- * If the sleep is interrupted by a signal, this function will return -EINTR.
- * It returns 0 if the semaphore was acquired successfully.
+ * this function returns -ETIME. If the sleep is interrupted by a signal,
+ * this function will return -EINTR. It returns 0 if the semaphore was
+ * acquired successfully.
+ *
+ * Acquires the semaphore without jiffies. Try to acquire the semaphore
+ * atomically. Returns 0 if the semaphore has been acquired successfully
+ * or 1 if it it cannot be acquired.
  */
 static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
 {
@@ -262,8 +263,8 @@ static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
 		if (ret < 0)
 			dev_warn(nau8825->dev, "Acquire semaphore timeout\n");
 	} else {
-		ret = down_interruptible(&nau8825->xtalk_sem);
-		if (ret < 0)
+		ret = down_trylock(&nau8825->xtalk_sem);
+		if (ret)
 			dev_warn(nau8825->dev, "Acquire semaphore fail\n");
 	}
 
@@ -454,22 +455,32 @@ static void nau8825_xtalk_backup(struct nau8825 *nau8825)
 {
 	int i;
 
+	if (nau8825->xtalk_baktab_initialized)
+		return;
+
 	/* Backup some register values to backup table */
 	for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++)
 		regmap_read(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
 				&nau8825_xtalk_baktab[i].def);
+
+	nau8825->xtalk_baktab_initialized = true;
 }
 
-static void nau8825_xtalk_restore(struct nau8825 *nau8825)
+static void nau8825_xtalk_restore(struct nau8825 *nau8825, bool cause_cancel)
 {
 	int i, volume;
 
+	if (!nau8825->xtalk_baktab_initialized)
+		return;
+
 	/* Restore register values from backup table; When the driver restores
-	 * the headphone volumem, it needs recover to original level gradually
-	 * with 3dB per step for less pop noise.
+	 * the headphone volume in XTALK_DONE state, it needs recover to
+	 * original level gradually with 3dB per step for less pop noise.
+	 * Otherwise, the restore should do ASAP.
 	 */
 	for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++) {
-		if (nau8825_xtalk_baktab[i].reg == NAU8825_REG_HSVOL_CTRL) {
+		if (!cause_cancel && nau8825_xtalk_baktab[i].reg ==
+			NAU8825_REG_HSVOL_CTRL) {
 			/* Ramping up the volume change to reduce pop noise */
 			volume = nau8825_xtalk_baktab[i].def &
 				NAU8825_HPR_VOL_MASK;
@@ -479,6 +490,8 @@ static void nau8825_xtalk_restore(struct nau8825 *nau8825)
 		regmap_write(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
 				nau8825_xtalk_baktab[i].def);
 	}
+
+	nau8825->xtalk_baktab_initialized = false;
 }
 
 static void nau8825_xtalk_prepare_dac(struct nau8825 *nau8825)
@@ -644,7 +657,7 @@ static void nau8825_xtalk_clean_adc(struct nau8825 *nau8825)
 		NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK, 0);
 }
 
-static void nau8825_xtalk_clean(struct nau8825 *nau8825)
+static void nau8825_xtalk_clean(struct nau8825 *nau8825, bool cause_cancel)
 {
 	/* Enable internal VCO needed for interruptions */
 	nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
@@ -660,7 +673,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
 		NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
 		NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
 	/* Restore value of specific register for cross talk */
-	nau8825_xtalk_restore(nau8825);
+	nau8825_xtalk_restore(nau8825, cause_cancel);
 }
 
 static void nau8825_xtalk_imm_start(struct nau8825 *nau8825, int vol)
@@ -779,7 +792,7 @@ static void nau8825_xtalk_measure(struct nau8825 *nau8825)
 		dev_dbg(nau8825->dev, "cross talk sidetone: %x\n", sidetone);
 		regmap_write(nau8825->regmap, NAU8825_REG_DAC_DGAIN_CTRL,
 					(sidetone << 8) | sidetone);
-		nau8825_xtalk_clean(nau8825);
+		nau8825_xtalk_clean(nau8825, false);
 		nau8825->xtalk_state = NAU8825_XTALK_DONE;
 		break;
 	default:
@@ -815,13 +828,14 @@ static void nau8825_xtalk_work(struct work_struct *work)
 
 static void nau8825_xtalk_cancel(struct nau8825 *nau8825)
 {
-	/* If the xtalk_protect is true, that means the process is still
-	 * on going. The driver forces to cancel the cross talk task and
+	/* If the crosstalk is eanbled and the process is on going,
+	 * the driver forces to cancel the crosstalk task and
 	 * restores the configuration to original status.
 	 */
-	if (nau8825->xtalk_protect) {
+	if (nau8825->xtalk_enable && nau8825->xtalk_state !=
+		NAU8825_XTALK_DONE) {
 		cancel_work_sync(&nau8825->xtalk_work);
-		nau8825_xtalk_clean(nau8825);
+		nau8825_xtalk_clean(nau8825, true);
 	}
 	/* Reset parameters for cross talk suppression function */
 	nau8825_sema_reset(nau8825);
@@ -1246,8 +1260,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
 		regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
 		osr &= NAU8825_DAC_OVERSAMPLE_MASK;
 		if (nau8825_clock_check(nau8825, substream->stream,
-			params_rate(params), osr))
+			params_rate(params), osr)) {
+			nau8825_sema_release(nau8825);
 			return -EINVAL;
+		}
 		regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
 			NAU8825_CLK_DAC_SRC_MASK,
 			osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
@@ -1255,8 +1271,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
 		regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
 		osr &= NAU8825_ADC_SYNC_DOWN_MASK;
 		if (nau8825_clock_check(nau8825, substream->stream,
-			params_rate(params), osr))
+			params_rate(params), osr)) {
+			nau8825_sema_release(nau8825);
 			return -EINVAL;
+		}
 		regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
 			NAU8825_CLK_ADC_SRC_MASK,
 			osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
@@ -1273,8 +1291,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
 			bclk_div = 1;
 		else if (bclk_fs <= 128)
 			bclk_div = 0;
-		else
+		else {
+			nau8825_sema_release(nau8825);
 			return -EINVAL;
+		}
 		regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
 			NAU8825_I2S_LRC_DIV_MASK | NAU8825_I2S_BLK_DIV_MASK,
 			((bclk_div + 1) << NAU8825_I2S_LRC_DIV_SFT) | bclk_div);
@@ -1294,6 +1314,7 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
 		val_len |= NAU8825_I2S_DL_32;
 		break;
 	default:
+		nau8825_sema_release(nau8825);
 		return -EINVAL;
 	}
 
@@ -1312,8 +1333,6 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 	struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
 	unsigned int ctrl1_val = 0, ctrl2_val = 0;
 
-	nau8825_sema_acquire(nau8825, 3 * HZ);
-
 	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
 	case SND_SOC_DAIFMT_CBM_CFM:
 		ctrl2_val |= NAU8825_I2S_MS_MASTER;
@@ -1355,6 +1374,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 		return -EINVAL;
 	}
 
+	nau8825_sema_acquire(nau8825, 3 * HZ);
+
 	regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
 		NAU8825_I2S_DL_MASK | NAU8825_I2S_DF_MASK |
 		NAU8825_I2S_BP_MASK | NAU8825_I2S_PCMB_MASK,
@@ -1687,7 +1708,7 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
 	} else if (active_irq & NAU8825_HEADSET_COMPLETION_IRQ) {
 		if (nau8825_is_jack_inserted(regmap)) {
 			event |= nau8825_jack_insert(nau8825);
-			if (!nau8825->xtalk_bypass && !nau8825->high_imped) {
+			if (nau8825->xtalk_enable && !nau8825->high_imped) {
 				/* Apply the cross talk suppression in the
 				 * headset without high impedance.
 				 */
@@ -1701,12 +1722,15 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
 					int ret;
 					nau8825->xtalk_protect = true;
 					ret = nau8825_sema_acquire(nau8825, 0);
-					if (ret < 0)
+					if (ret)
 						nau8825->xtalk_protect = false;
 				}
 				/* Startup cross talk detection process */
-				nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
-				schedule_work(&nau8825->xtalk_work);
+				if (nau8825->xtalk_protect) {
+					nau8825->xtalk_state =
+						NAU8825_XTALK_PREPARE;
+					schedule_work(&nau8825->xtalk_work);
+				}
 			} else {
 				/* The cross talk suppression shouldn't apply
 				 * in the headset with high impedance. Thus,
@@ -1733,7 +1757,9 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
 			nau8825->xtalk_event_mask = event_mask;
 		}
 	} else if (active_irq & NAU8825_IMPEDANCE_MEAS_IRQ) {
-		schedule_work(&nau8825->xtalk_work);
+		/* crosstalk detection enable and process on going */
+		if (nau8825->xtalk_enable && nau8825->xtalk_protect)
+			schedule_work(&nau8825->xtalk_work);
 		clear_irq = NAU8825_IMPEDANCE_MEAS_IRQ;
 	} else if ((active_irq & NAU8825_JACK_INSERTION_IRQ_MASK) ==
 		NAU8825_JACK_INSERTION_DETECTED) {
@@ -2382,7 +2408,7 @@ static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
 	regcache_sync(nau8825->regmap);
 	nau8825->xtalk_protect = true;
 	ret = nau8825_sema_acquire(nau8825, 0);
-	if (ret < 0)
+	if (ret)
 		nau8825->xtalk_protect = false;
 	enable_irq(nau8825->irq);
 
@@ -2441,8 +2467,8 @@ static void nau8825_print_device_properties(struct nau8825 *nau8825)
 			nau8825->jack_insert_debounce);
 	dev_dbg(dev, "jack-eject-debounce:  %d\n",
 			nau8825->jack_eject_debounce);
-	dev_dbg(dev, "crosstalk-bypass:     %d\n",
-			nau8825->xtalk_bypass);
+	dev_dbg(dev, "crosstalk-enable:     %d\n",
+			nau8825->xtalk_enable);
 }
 
 static int nau8825_read_device_properties(struct device *dev,
@@ -2507,8 +2533,8 @@ static int nau8825_read_device_properties(struct device *dev,
 		&nau8825->jack_eject_debounce);
 	if (ret)
 		nau8825->jack_eject_debounce = 0;
-	nau8825->xtalk_bypass = device_property_read_bool(dev,
-		"nuvoton,crosstalk-bypass");
+	nau8825->xtalk_enable = device_property_read_bool(dev,
+		"nuvoton,crosstalk-enable");
 
 	nau8825->mclk = devm_clk_get(dev, "mclk");
 	if (PTR_ERR(nau8825->mclk) == -EPROBE_DEFER) {
@@ -2569,6 +2595,7 @@ static int nau8825_i2c_probe(struct i2c_client *i2c,
 	 */
 	nau8825->xtalk_state = NAU8825_XTALK_DONE;
 	nau8825->xtalk_protect = false;
+	nau8825->xtalk_baktab_initialized = false;
 	sema_init(&nau8825->xtalk_sem, 1);
 	INIT_WORK(&nau8825->xtalk_work, nau8825_xtalk_work);
 
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 8aee5c8647aee153f151ac09175dc3f6a9fd25da..f7e7321258825759d2adad0db15e84ced34f698e 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -476,7 +476,8 @@ struct nau8825 {
 	int xtalk_event_mask;
 	bool xtalk_protect;
 	int imp_rms[NAU8825_XTALK_IMM];
-	int xtalk_bypass;
+	int xtalk_enable;
+	bool xtalk_baktab_initialized; /* True if initialized. */
 };
 
 int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/pcm186x-i2c.c b/sound/soc/codecs/pcm186x-i2c.c
new file mode 100644
index 0000000000000000000000000000000000000000..543621232d604a4227f8df988c34dbc249dfb4fa
--- /dev/null
+++ b/sound/soc/codecs/pcm186x-i2c.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - I2C
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *	Andreas Dannenberg <dannenberg@ti.com>
+ *	Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+	{ .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+	{ .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+	{ .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+	{ .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_i2c_probe(struct i2c_client *i2c,
+			     const struct i2c_device_id *id)
+{
+	const enum pcm186x_type type = (enum pcm186x_type)id->driver_data;
+	int irq = i2c->irq;
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_i2c(i2c, &pcm186x_regmap);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	return pcm186x_probe(&i2c->dev, type, irq, regmap);
+}
+
+static int pcm186x_i2c_remove(struct i2c_client *i2c)
+{
+	pcm186x_remove(&i2c->dev);
+
+	return 0;
+}
+
+static const struct i2c_device_id pcm186x_i2c_id[] = {
+	{ "pcm1862", PCM1862 },
+	{ "pcm1863", PCM1863 },
+	{ "pcm1864", PCM1864 },
+	{ "pcm1865", PCM1865 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, pcm186x_i2c_id);
+
+static struct i2c_driver pcm186x_i2c_driver = {
+	.probe		= pcm186x_i2c_probe,
+	.remove		= pcm186x_i2c_remove,
+	.id_table	= pcm186x_i2c_id,
+	.driver		= {
+		.name	= "pcm186x",
+		.of_match_table = pcm186x_of_match,
+	},
+};
+module_i2c_driver(pcm186x_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC I2C Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x-spi.c b/sound/soc/codecs/pcm186x-spi.c
new file mode 100644
index 0000000000000000000000000000000000000000..2366f8e4d4d46c5cbcabd1a17e66f2e1e9336545
--- /dev/null
+++ b/sound/soc/codecs/pcm186x-spi.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - SPI
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *	Andreas Dannenberg <dannenberg@ti.com>
+ *	Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+	{ .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+	{ .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+	{ .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+	{ .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_spi_probe(struct spi_device *spi)
+{
+	const enum pcm186x_type type =
+			 (enum pcm186x_type)spi_get_device_id(spi)->driver_data;
+	int irq = spi->irq;
+	struct regmap *regmap;
+
+	regmap = devm_regmap_init_spi(spi, &pcm186x_regmap);
+	if (IS_ERR(regmap))
+		return PTR_ERR(regmap);
+
+	return pcm186x_probe(&spi->dev, type, irq, regmap);
+}
+
+static int pcm186x_spi_remove(struct spi_device *spi)
+{
+	pcm186x_remove(&spi->dev);
+
+	return 0;
+}
+
+static const struct spi_device_id pcm186x_spi_id[] = {
+	{ "pcm1862", PCM1862 },
+	{ "pcm1863", PCM1863 },
+	{ "pcm1864", PCM1864 },
+	{ "pcm1865", PCM1865 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, pcm186x_spi_id);
+
+static struct spi_driver pcm186x_spi_driver = {
+	.probe		= pcm186x_spi_probe,
+	.remove		= pcm186x_spi_remove,
+	.id_table	= pcm186x_spi_id,
+	.driver		= {
+		.name	= "pcm186x",
+		.of_match_table = pcm186x_of_match,
+	},
+};
+module_spi_driver(pcm186x_spi_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC SPI Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
new file mode 100644
index 0000000000000000000000000000000000000000..cdb51427facc4e162e2d0f709f80bbd6c00f3721
--- /dev/null
+++ b/sound/soc/codecs/pcm186x.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *	Andreas Dannenberg <dannenberg@ti.com>
+ *	Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "pcm186x.h"
+
+static const char * const pcm186x_supply_names[] = {
+	"avdd",		/* Analog power supply. Connect to 3.3-V supply. */
+	"dvdd",		/* Digital power supply. Connect to 3.3-V supply. */
+	"iovdd",	/* I/O power supply. Connect to 3.3-V or 1.8-V. */
+};
+#define PCM186x_NUM_SUPPLIES ARRAY_SIZE(pcm186x_supply_names)
+
+struct pcm186x_priv {
+	struct regmap *regmap;
+	struct regulator_bulk_data supplies[PCM186x_NUM_SUPPLIES];
+	unsigned int sysclk;
+	unsigned int tdm_offset;
+	bool is_tdm_mode;
+	bool is_master_mode;
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
+
+static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
+	SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
+			   PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+			   pcm186x_pga_tlv),
+};
+
+static const struct snd_kcontrol_new pcm1865_snd_controls[] = {
+	SOC_DOUBLE_R_S_TLV("ADC1 Capture Volume", PCM186X_PGA_VAL_CH1_L,
+			   PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+			   pcm186x_pga_tlv),
+	SOC_DOUBLE_R_S_TLV("ADC2 Capture Volume", PCM186X_PGA_VAL_CH2_L,
+			   PCM186X_PGA_VAL_CH2_R, 0, -24, 80, 7, 0,
+			   pcm186x_pga_tlv),
+};
+
+static const unsigned int pcm186x_adc_input_channel_sel_value[] = {
+	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+	0x10, 0x20, 0x30
+};
+
+static const char * const pcm186x_adcl_input_channel_sel_text[] = {
+	"No Select",
+	"VINL1[SE]",					/* Default for ADC1L */
+	"VINL2[SE]",					/* Default for ADC2L */
+	"VINL2[SE] + VINL1[SE]",
+	"VINL3[SE]",
+	"VINL3[SE] + VINL1[SE]",
+	"VINL3[SE] + VINL2[SE]",
+	"VINL3[SE] + VINL2[SE] + VINL1[SE]",
+	"VINL4[SE]",
+	"VINL4[SE] + VINL1[SE]",
+	"VINL4[SE] + VINL2[SE]",
+	"VINL4[SE] + VINL2[SE] + VINL1[SE]",
+	"VINL4[SE] + VINL3[SE]",
+	"VINL4[SE] + VINL3[SE] + VINL1[SE]",
+	"VINL4[SE] + VINL3[SE] + VINL2[SE]",
+	"VINL4[SE] + VINL3[SE] + VINL2[SE] + VINL1[SE]",
+	"{VIN1P, VIN1M}[DIFF]",
+	"{VIN4P, VIN4M}[DIFF]",
+	"{VIN1P, VIN1M}[DIFF] + {VIN4P, VIN4M}[DIFF]"
+};
+
+static const char * const pcm186x_adcr_input_channel_sel_text[] = {
+	"No Select",
+	"VINR1[SE]",					/* Default for ADC1R */
+	"VINR2[SE]",					/* Default for ADC2R */
+	"VINR2[SE] + VINR1[SE]",
+	"VINR3[SE]",
+	"VINR3[SE] + VINR1[SE]",
+	"VINR3[SE] + VINR2[SE]",
+	"VINR3[SE] + VINR2[SE] + VINR1[SE]",
+	"VINR4[SE]",
+	"VINR4[SE] + VINR1[SE]",
+	"VINR4[SE] + VINR2[SE]",
+	"VINR4[SE] + VINR2[SE] + VINR1[SE]",
+	"VINR4[SE] + VINR3[SE]",
+	"VINR4[SE] + VINR3[SE] + VINR1[SE]",
+	"VINR4[SE] + VINR3[SE] + VINR2[SE]",
+	"VINR4[SE] + VINR3[SE] + VINR2[SE] + VINR1[SE]",
+	"{VIN2P, VIN2M}[DIFF]",
+	"{VIN3P, VIN3M}[DIFF]",
+	"{VIN2P, VIN2M}[DIFF] + {VIN3P, VIN3M}[DIFF]"
+};
+
+static const struct soc_enum pcm186x_adc_input_channel_sel[] = {
+	SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_L, 0,
+			      PCM186X_ADC_INPUT_SEL_MASK,
+			      ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+			      pcm186x_adcl_input_channel_sel_text,
+			      pcm186x_adc_input_channel_sel_value),
+	SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_R, 0,
+			      PCM186X_ADC_INPUT_SEL_MASK,
+			      ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+			      pcm186x_adcr_input_channel_sel_text,
+			      pcm186x_adc_input_channel_sel_value),
+	SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_L, 0,
+			      PCM186X_ADC_INPUT_SEL_MASK,
+			      ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+			      pcm186x_adcl_input_channel_sel_text,
+			      pcm186x_adc_input_channel_sel_value),
+	SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_R, 0,
+			      PCM186X_ADC_INPUT_SEL_MASK,
+			      ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+			      pcm186x_adcr_input_channel_sel_text,
+			      pcm186x_adc_input_channel_sel_value),
+};
+
+static const struct snd_kcontrol_new pcm186x_adc_mux_controls[] = {
+	SOC_DAPM_ENUM("ADC1 Left Input", pcm186x_adc_input_channel_sel[0]),
+	SOC_DAPM_ENUM("ADC1 Right Input", pcm186x_adc_input_channel_sel[1]),
+	SOC_DAPM_ENUM("ADC2 Left Input", pcm186x_adc_input_channel_sel[2]),
+	SOC_DAPM_ENUM("ADC2 Right Input", pcm186x_adc_input_channel_sel[3]),
+};
+
+static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("VINL1"),
+	SND_SOC_DAPM_INPUT("VINR1"),
+	SND_SOC_DAPM_INPUT("VINL2"),
+	SND_SOC_DAPM_INPUT("VINR2"),
+	SND_SOC_DAPM_INPUT("VINL3"),
+	SND_SOC_DAPM_INPUT("VINR3"),
+	SND_SOC_DAPM_INPUT("VINL4"),
+	SND_SOC_DAPM_INPUT("VINR4"),
+
+	SND_SOC_DAPM_MUX("ADC Left Capture Source", SND_SOC_NOPM, 0, 0,
+			 &pcm186x_adc_mux_controls[0]),
+	SND_SOC_DAPM_MUX("ADC Right Capture Source", SND_SOC_NOPM, 0, 0,
+			 &pcm186x_adc_mux_controls[1]),
+
+	/*
+	 * Put the codec into SLEEP mode when not in use, allowing the
+	 * Energysense mechanism to operate.
+	 */
+	SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1,  0),
+};
+
+static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("VINL1"),
+	SND_SOC_DAPM_INPUT("VINR1"),
+	SND_SOC_DAPM_INPUT("VINL2"),
+	SND_SOC_DAPM_INPUT("VINR2"),
+	SND_SOC_DAPM_INPUT("VINL3"),
+	SND_SOC_DAPM_INPUT("VINR3"),
+	SND_SOC_DAPM_INPUT("VINL4"),
+	SND_SOC_DAPM_INPUT("VINR4"),
+
+	SND_SOC_DAPM_MUX("ADC1 Left Capture Source", SND_SOC_NOPM, 0, 0,
+			 &pcm186x_adc_mux_controls[0]),
+	SND_SOC_DAPM_MUX("ADC1 Right Capture Source", SND_SOC_NOPM, 0, 0,
+			 &pcm186x_adc_mux_controls[1]),
+	SND_SOC_DAPM_MUX("ADC2 Left Capture Source", SND_SOC_NOPM, 0, 0,
+			 &pcm186x_adc_mux_controls[2]),
+	SND_SOC_DAPM_MUX("ADC2 Right Capture Source", SND_SOC_NOPM, 0, 0,
+			 &pcm186x_adc_mux_controls[3]),
+
+	/*
+	 * Put the codec into SLEEP mode when not in use, allowing the
+	 * Energysense mechanism to operate.
+	 */
+	SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1,  0),
+	SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1,  0),
+};
+
+static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
+	{ "ADC Left Capture Source", NULL, "VINL1" },
+	{ "ADC Left Capture Source", NULL, "VINR1" },
+	{ "ADC Left Capture Source", NULL, "VINL2" },
+	{ "ADC Left Capture Source", NULL, "VINR2" },
+	{ "ADC Left Capture Source", NULL, "VINL3" },
+	{ "ADC Left Capture Source", NULL, "VINR3" },
+	{ "ADC Left Capture Source", NULL, "VINL4" },
+	{ "ADC Left Capture Source", NULL, "VINR4" },
+
+	{ "ADC", NULL, "ADC Left Capture Source" },
+
+	{ "ADC Right Capture Source", NULL, "VINL1" },
+	{ "ADC Right Capture Source", NULL, "VINR1" },
+	{ "ADC Right Capture Source", NULL, "VINL2" },
+	{ "ADC Right Capture Source", NULL, "VINR2" },
+	{ "ADC Right Capture Source", NULL, "VINL3" },
+	{ "ADC Right Capture Source", NULL, "VINR3" },
+	{ "ADC Right Capture Source", NULL, "VINL4" },
+	{ "ADC Right Capture Source", NULL, "VINR4" },
+
+	{ "ADC", NULL, "ADC Right Capture Source" },
+};
+
+static const struct snd_soc_dapm_route pcm1865_dapm_routes[] = {
+	{ "ADC1 Left Capture Source", NULL, "VINL1" },
+	{ "ADC1 Left Capture Source", NULL, "VINR1" },
+	{ "ADC1 Left Capture Source", NULL, "VINL2" },
+	{ "ADC1 Left Capture Source", NULL, "VINR2" },
+	{ "ADC1 Left Capture Source", NULL, "VINL3" },
+	{ "ADC1 Left Capture Source", NULL, "VINR3" },
+	{ "ADC1 Left Capture Source", NULL, "VINL4" },
+	{ "ADC1 Left Capture Source", NULL, "VINR4" },
+
+	{ "ADC1", NULL, "ADC1 Left Capture Source" },
+
+	{ "ADC1 Right Capture Source", NULL, "VINL1" },
+	{ "ADC1 Right Capture Source", NULL, "VINR1" },
+	{ "ADC1 Right Capture Source", NULL, "VINL2" },
+	{ "ADC1 Right Capture Source", NULL, "VINR2" },
+	{ "ADC1 Right Capture Source", NULL, "VINL3" },
+	{ "ADC1 Right Capture Source", NULL, "VINR3" },
+	{ "ADC1 Right Capture Source", NULL, "VINL4" },
+	{ "ADC1 Right Capture Source", NULL, "VINR4" },
+
+	{ "ADC1", NULL, "ADC1 Right Capture Source" },
+
+	{ "ADC2 Left Capture Source", NULL, "VINL1" },
+	{ "ADC2 Left Capture Source", NULL, "VINR1" },
+	{ "ADC2 Left Capture Source", NULL, "VINL2" },
+	{ "ADC2 Left Capture Source", NULL, "VINR2" },
+	{ "ADC2 Left Capture Source", NULL, "VINL3" },
+	{ "ADC2 Left Capture Source", NULL, "VINR3" },
+	{ "ADC2 Left Capture Source", NULL, "VINL4" },
+	{ "ADC2 Left Capture Source", NULL, "VINR4" },
+
+	{ "ADC2", NULL, "ADC2 Left Capture Source" },
+
+	{ "ADC2 Right Capture Source", NULL, "VINL1" },
+	{ "ADC2 Right Capture Source", NULL, "VINR1" },
+	{ "ADC2 Right Capture Source", NULL, "VINL2" },
+	{ "ADC2 Right Capture Source", NULL, "VINR2" },
+	{ "ADC2 Right Capture Source", NULL, "VINL3" },
+	{ "ADC2 Right Capture Source", NULL, "VINR3" },
+	{ "ADC2 Right Capture Source", NULL, "VINL4" },
+	{ "ADC2 Right Capture Source", NULL, "VINR4" },
+
+	{ "ADC2", NULL, "ADC2 Right Capture Source" },
+};
+
+static int pcm186x_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params,
+			     struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+	unsigned int rate = params_rate(params);
+	unsigned int format = params_format(params);
+	unsigned int width = params_width(params);
+	unsigned int channels = params_channels(params);
+	unsigned int div_lrck;
+	unsigned int div_bck;
+	u8 tdm_tx_sel = 0;
+	u8 pcm_cfg = 0;
+
+	dev_dbg(codec->dev, "%s() rate=%u format=0x%x width=%u channels=%u\n",
+		__func__, rate, format, width, channels);
+
+	switch (width) {
+	case 16:
+		pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_16 <<
+			  PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+			  PCM186X_PCM_CFG_TX_WLEN_16 <<
+			  PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+		break;
+	case 20:
+		pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_20 <<
+			  PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+			  PCM186X_PCM_CFG_TX_WLEN_20 <<
+			  PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+		break;
+	case 24:
+		pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_24 <<
+			  PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+			  PCM186X_PCM_CFG_TX_WLEN_24 <<
+			  PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+		break;
+	case 32:
+		pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_32 <<
+			  PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+			  PCM186X_PCM_CFG_TX_WLEN_32 <<
+			  PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+			    PCM186X_PCM_CFG_RX_WLEN_MASK |
+			    PCM186X_PCM_CFG_TX_WLEN_MASK,
+			    pcm_cfg);
+
+	div_lrck = width * channels;
+
+	if (priv->is_tdm_mode) {
+		/* Select TDM transmission data */
+		switch (channels) {
+		case 2:
+			tdm_tx_sel = PCM186X_TDM_TX_SEL_2CH;
+			break;
+		case 4:
+			tdm_tx_sel = PCM186X_TDM_TX_SEL_4CH;
+			break;
+		case 6:
+			tdm_tx_sel = PCM186X_TDM_TX_SEL_6CH;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		snd_soc_update_bits(codec, PCM186X_TDM_TX_SEL,
+				    PCM186X_TDM_TX_SEL_MASK, tdm_tx_sel);
+
+		/* In DSP/TDM mode, the LRCLK divider must be 256 */
+		div_lrck = 256;
+
+		/* Configure 1/256 duty cycle for LRCK */
+		snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+				    PCM186X_PCM_CFG_TDM_LRCK_MODE,
+				    PCM186X_PCM_CFG_TDM_LRCK_MODE);
+	}
+
+	/* Only configure clock dividers in master mode. */
+	if (priv->is_master_mode) {
+		div_bck = priv->sysclk / (div_lrck * rate);
+
+		dev_dbg(codec->dev,
+			"%s() master_clk=%u div_bck=%u div_lrck=%u\n",
+			__func__, priv->sysclk, div_bck, div_lrck);
+
+		snd_soc_write(codec, PCM186X_BCK_DIV, div_bck - 1);
+		snd_soc_write(codec, PCM186X_LRK_DIV, div_lrck - 1);
+	}
+
+	return 0;
+}
+
+static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+	u8 clk_ctrl = 0;
+	u8 pcm_cfg = 0;
+
+	dev_dbg(codec->dev, "%s() format=0x%x\n", __func__, format);
+
+	/* set master/slave audio interface */
+	switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		if (!priv->sysclk) {
+			dev_err(codec->dev, "operating in master mode requires sysclock to be configured\n");
+			return -EINVAL;
+		}
+		clk_ctrl |= PCM186X_CLK_CTRL_MST_MODE;
+		priv->is_master_mode = true;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		priv->is_master_mode = false;
+		break;
+	default:
+		dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+		return -EINVAL;
+	}
+
+	/* set interface polarity */
+	switch (format & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	default:
+		dev_err(codec->dev, "Inverted DAI clocks not supported\n");
+		return -EINVAL;
+	}
+
+	/* set interface format */
+	switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		pcm_cfg = PCM186X_PCM_CFG_FMT_I2S;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		pcm_cfg = PCM186X_PCM_CFG_FMT_LEFTJ;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		priv->tdm_offset += 1;
+		/* Fall through... DSP_A uses the same basic config as DSP_B
+		 * except we need to shift the TDM output by one BCK cycle
+		 */
+	case SND_SOC_DAIFMT_DSP_B:
+		priv->is_tdm_mode = true;
+		pcm_cfg = PCM186X_PCM_CFG_FMT_TDM;
+		break;
+	default:
+		dev_err(codec->dev, "Invalid DAI format\n");
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, PCM186X_CLK_CTRL,
+			    PCM186X_CLK_CTRL_MST_MODE, clk_ctrl);
+
+	snd_soc_write(codec, PCM186X_TDM_TX_OFFSET, priv->tdm_offset);
+
+	snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+			    PCM186X_PCM_CFG_FMT_MASK, pcm_cfg);
+
+	return 0;
+}
+
+static int pcm186x_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+				unsigned int rx_mask, int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+	unsigned int first_slot, last_slot, tdm_offset;
+
+	dev_dbg(codec->dev,
+		"%s() tx_mask=0x%x rx_mask=0x%x slots=%d slot_width=%d\n",
+		__func__, tx_mask, rx_mask, slots, slot_width);
+
+	if (!tx_mask) {
+		dev_err(codec->dev, "tdm tx mask must not be 0\n");
+		return -EINVAL;
+	}
+
+	first_slot = __ffs(tx_mask);
+	last_slot = __fls(tx_mask);
+
+	if (last_slot - first_slot != hweight32(tx_mask) - 1) {
+		dev_err(codec->dev, "tdm tx mask must be contiguous\n");
+		return -EINVAL;
+	}
+
+	tdm_offset = first_slot * slot_width;
+
+	if (tdm_offset > 255) {
+		dev_err(codec->dev, "tdm tx slot selection out of bounds\n");
+		return -EINVAL;
+	}
+
+	priv->tdm_offset = tdm_offset;
+
+	return 0;
+}
+
+static int pcm186x_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+				  unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s() clk_id=%d freq=%u dir=%d\n",
+		__func__, clk_id, freq, dir);
+
+	priv->sysclk = freq;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops pcm186x_dai_ops = {
+	.set_sysclk = pcm186x_set_dai_sysclk,
+	.set_tdm_slot = pcm186x_set_tdm_slot,
+	.set_fmt = pcm186x_set_fmt,
+	.hw_params = pcm186x_hw_params,
+};
+
+static struct snd_soc_dai_driver pcm1863_dai = {
+	.name = "pcm1863-aif",
+	.capture = {
+		 .stream_name = "Capture",
+		 .channels_min = 1,
+		 .channels_max = 2,
+		 .rates = PCM186X_RATES,
+		 .formats = PCM186X_FORMATS,
+	 },
+	.ops = &pcm186x_dai_ops,
+};
+
+static struct snd_soc_dai_driver pcm1865_dai = {
+	.name = "pcm1865-aif",
+	.capture = {
+		 .stream_name = "Capture",
+		 .channels_min = 1,
+		 .channels_max = 4,
+		 .rates = PCM186X_RATES,
+		 .formats = PCM186X_FORMATS,
+	 },
+	.ops = &pcm186x_dai_ops,
+};
+
+static int pcm186x_power_on(struct snd_soc_codec *codec)
+{
+	struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+	int ret = 0;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+				    priv->supplies);
+	if (ret)
+		return ret;
+
+	regcache_cache_only(priv->regmap, false);
+	ret = regcache_sync(priv->regmap);
+	if (ret) {
+		dev_err(codec->dev, "Failed to restore cache\n");
+		regcache_cache_only(priv->regmap, true);
+		regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+				       priv->supplies);
+		return ret;
+	}
+
+	snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+			    PCM186X_PWR_CTRL_PWRDN, 0);
+
+	return 0;
+}
+
+static int pcm186x_power_off(struct snd_soc_codec *codec)
+{
+	struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+			    PCM186X_PWR_CTRL_PWRDN, PCM186X_PWR_CTRL_PWRDN);
+
+	regcache_cache_only(priv->regmap, true);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+				     priv->supplies);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int pcm186x_set_bias_level(struct snd_soc_codec *codec,
+				  enum snd_soc_bias_level level)
+{
+	dev_dbg(codec->dev, "## %s: %d -> %d\n", __func__,
+		snd_soc_codec_get_bias_level(codec), level);
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+			pcm186x_power_on(codec);
+		break;
+	case SND_SOC_BIAS_OFF:
+		pcm186x_power_off(codec);
+		break;
+	}
+
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1863 = {
+	.set_bias_level = pcm186x_set_bias_level,
+
+	.component_driver = {
+		.controls = pcm1863_snd_controls,
+		.num_controls = ARRAY_SIZE(pcm1863_snd_controls),
+		.dapm_widgets = pcm1863_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(pcm1863_dapm_widgets),
+		.dapm_routes = pcm1863_dapm_routes,
+		.num_dapm_routes = ARRAY_SIZE(pcm1863_dapm_routes),
+	},
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1865 = {
+	.set_bias_level = pcm186x_set_bias_level,
+	.suspend_bias_off = true,
+
+	.component_driver = {
+		.controls = pcm1865_snd_controls,
+		.num_controls = ARRAY_SIZE(pcm1865_snd_controls),
+		.dapm_widgets = pcm1865_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(pcm1865_dapm_widgets),
+		.dapm_routes = pcm1865_dapm_routes,
+		.num_dapm_routes = ARRAY_SIZE(pcm1865_dapm_routes),
+	},
+};
+
+static bool pcm186x_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case PCM186X_PAGE:
+	case PCM186X_DEVICE_STATUS:
+	case PCM186X_FSAMPLE_STATUS:
+	case PCM186X_DIV_STATUS:
+	case PCM186X_CLK_STATUS:
+	case PCM186X_SUPPLY_STATUS:
+	case PCM186X_MMAP_STAT_CTRL:
+	case PCM186X_MMAP_ADDRESS:
+		return true;
+	}
+
+	return false;
+}
+
+static const struct regmap_range_cfg pcm186x_range = {
+	.name = "Pages",
+	.range_max = PCM186X_MAX_REGISTER,
+	.selector_reg = PCM186X_PAGE,
+	.selector_mask = 0xff,
+	.window_len = PCM186X_PAGE_LEN,
+};
+
+const struct regmap_config pcm186x_regmap = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.volatile_reg = pcm186x_volatile,
+
+	.ranges = &pcm186x_range,
+	.num_ranges = 1,
+
+	.max_register = PCM186X_MAX_REGISTER,
+
+	.cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(pcm186x_regmap);
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+		  struct regmap *regmap)
+{
+	struct pcm186x_priv *priv;
+	int i, ret;
+
+	priv = devm_kzalloc(dev, sizeof(struct pcm186x_priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, priv);
+	priv->regmap = regmap;
+
+	for (i = 0; i < ARRAY_SIZE(priv->supplies); i++)
+		priv->supplies[i].supply = pcm186x_supply_names[i];
+
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+				      priv->supplies);
+	if (ret) {
+		dev_err(dev, "failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+				    priv->supplies);
+	if (ret) {
+		dev_err(dev, "failed enable supplies: %d\n", ret);
+		return ret;
+	}
+
+	/* Reset device registers for a consistent power-on like state */
+	ret = regmap_write(regmap, PCM186X_PAGE, PCM186X_RESET);
+	if (ret) {
+		dev_err(dev, "failed to write device: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+				     priv->supplies);
+	if (ret) {
+		dev_err(dev, "failed disable supplies: %d\n", ret);
+		return ret;
+	}
+
+	switch (type) {
+	case PCM1865:
+	case PCM1864:
+		ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1865,
+					     &pcm1865_dai, 1);
+		break;
+	case PCM1863:
+	case PCM1862:
+	default:
+		ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1863,
+					     &pcm1863_dai, 1);
+	}
+	if (ret) {
+		dev_err(dev, "failed to register CODEC: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_probe);
+
+int pcm186x_remove(struct device *dev)
+{
+	snd_soc_unregister_codec(dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_remove);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
new file mode 100644
index 0000000000000000000000000000000000000000..b630111bb3c4fb3f4d455676685f72cd75ae0289
--- /dev/null
+++ b/sound/soc/codecs/pcm186x.h
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ *	Andreas Dannenberg <dannenberg@ti.com>
+ *	Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef _PCM186X_H_
+#define _PCM186X_H_
+
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+enum pcm186x_type {
+	PCM1862,
+	PCM1863,
+	PCM1864,
+	PCM1865,
+};
+
+#define PCM186X_RATES	SNDRV_PCM_RATE_8000_192000
+#define PCM186X_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | \
+			 SNDRV_PCM_FMTBIT_S20_3LE |\
+			 SNDRV_PCM_FMTBIT_S24_LE | \
+			 SNDRV_PCM_FMTBIT_S32_LE)
+
+#define PCM186X_PAGE_LEN		0x0100
+#define PCM186X_PAGE_BASE(n)		(PCM186X_PAGE_LEN * n)
+
+/* The page selection register address is the same on all pages */
+#define PCM186X_PAGE			0
+
+/* Register Definitions - Page 0 */
+#define PCM186X_PGA_VAL_CH1_L		(PCM186X_PAGE_BASE(0) +   1)
+#define PCM186X_PGA_VAL_CH1_R		(PCM186X_PAGE_BASE(0) +   2)
+#define PCM186X_PGA_VAL_CH2_L		(PCM186X_PAGE_BASE(0) +   3)
+#define PCM186X_PGA_VAL_CH2_R		(PCM186X_PAGE_BASE(0) +   4)
+#define PCM186X_PGA_CTRL		(PCM186X_PAGE_BASE(0) +   5)
+#define PCM186X_ADC1_INPUT_SEL_L	(PCM186X_PAGE_BASE(0) +   6)
+#define PCM186X_ADC1_INPUT_SEL_R	(PCM186X_PAGE_BASE(0) +   7)
+#define PCM186X_ADC2_INPUT_SEL_L	(PCM186X_PAGE_BASE(0) +   8)
+#define PCM186X_ADC2_INPUT_SEL_R	(PCM186X_PAGE_BASE(0) +   9)
+#define PCM186X_AUXADC_INPUT_SEL	(PCM186X_PAGE_BASE(0) +  10)
+#define PCM186X_PCM_CFG			(PCM186X_PAGE_BASE(0) +  11)
+#define PCM186X_TDM_TX_SEL		(PCM186X_PAGE_BASE(0) +  12)
+#define PCM186X_TDM_TX_OFFSET		(PCM186X_PAGE_BASE(0) +  13)
+#define PCM186X_TDM_RX_OFFSET		(PCM186X_PAGE_BASE(0) +  14)
+#define PCM186X_DPGA_VAL_CH1_L		(PCM186X_PAGE_BASE(0) +  15)
+#define PCM186X_GPIO1_0_CTRL		(PCM186X_PAGE_BASE(0) +  16)
+#define PCM186X_GPIO3_2_CTRL		(PCM186X_PAGE_BASE(0) +  17)
+#define PCM186X_GPIO1_0_DIR_CTRL	(PCM186X_PAGE_BASE(0) +  18)
+#define PCM186X_GPIO3_2_DIR_CTRL	(PCM186X_PAGE_BASE(0) +  19)
+#define PCM186X_GPIO_IN_OUT		(PCM186X_PAGE_BASE(0) +  20)
+#define PCM186X_GPIO_PULL_CTRL		(PCM186X_PAGE_BASE(0) +  21)
+#define PCM186X_DPGA_VAL_CH1_R		(PCM186X_PAGE_BASE(0) +  22)
+#define PCM186X_DPGA_VAL_CH2_L		(PCM186X_PAGE_BASE(0) +  23)
+#define PCM186X_DPGA_VAL_CH2_R		(PCM186X_PAGE_BASE(0) +  24)
+#define PCM186X_DPGA_GAIN_CTRL		(PCM186X_PAGE_BASE(0) +  25)
+#define PCM186X_DPGA_MIC_CTRL		(PCM186X_PAGE_BASE(0) +  26)
+#define PCM186X_DIN_RESAMP_CTRL		(PCM186X_PAGE_BASE(0) +  27)
+#define PCM186X_CLK_CTRL		(PCM186X_PAGE_BASE(0) +  32)
+#define PCM186X_DSP1_CLK_DIV		(PCM186X_PAGE_BASE(0) +  33)
+#define PCM186X_DSP2_CLK_DIV		(PCM186X_PAGE_BASE(0) +  34)
+#define PCM186X_ADC_CLK_DIV		(PCM186X_PAGE_BASE(0) +  35)
+#define PCM186X_PLL_SCK_DIV		(PCM186X_PAGE_BASE(0) +  37)
+#define PCM186X_BCK_DIV			(PCM186X_PAGE_BASE(0) +  38)
+#define PCM186X_LRK_DIV			(PCM186X_PAGE_BASE(0) +  39)
+#define PCM186X_PLL_CTRL		(PCM186X_PAGE_BASE(0) +  40)
+#define PCM186X_PLL_P_DIV		(PCM186X_PAGE_BASE(0) +  41)
+#define PCM186X_PLL_R_DIV		(PCM186X_PAGE_BASE(0) +  42)
+#define PCM186X_PLL_J_DIV		(PCM186X_PAGE_BASE(0) +  43)
+#define PCM186X_PLL_D_DIV_LSB		(PCM186X_PAGE_BASE(0) +  44)
+#define PCM186X_PLL_D_DIV_MSB		(PCM186X_PAGE_BASE(0) +  45)
+#define PCM186X_SIGDET_MODE		(PCM186X_PAGE_BASE(0) +  48)
+#define PCM186X_SIGDET_MASK		(PCM186X_PAGE_BASE(0) +  49)
+#define PCM186X_SIGDET_STAT		(PCM186X_PAGE_BASE(0) +  50)
+#define PCM186X_SIGDET_LOSS_TIME	(PCM186X_PAGE_BASE(0) +  52)
+#define PCM186X_SIGDET_SCAN_TIME	(PCM186X_PAGE_BASE(0) +  53)
+#define PCM186X_SIGDET_INT_INTVL	(PCM186X_PAGE_BASE(0) +  54)
+#define PCM186X_SIGDET_DC_REF_CH1_L	(PCM186X_PAGE_BASE(0) +  64)
+#define PCM186X_SIGDET_DC_DIFF_CH1_L	(PCM186X_PAGE_BASE(0) +  65)
+#define PCM186X_SIGDET_DC_LEV_CH1_L	(PCM186X_PAGE_BASE(0) +  66)
+#define PCM186X_SIGDET_DC_REF_CH1_R	(PCM186X_PAGE_BASE(0) +  67)
+#define PCM186X_SIGDET_DC_DIFF_CH1_R	(PCM186X_PAGE_BASE(0) +  68)
+#define PCM186X_SIGDET_DC_LEV_CH1_R	(PCM186X_PAGE_BASE(0) +  69)
+#define PCM186X_SIGDET_DC_REF_CH2_L	(PCM186X_PAGE_BASE(0) +  70)
+#define PCM186X_SIGDET_DC_DIFF_CH2_L	(PCM186X_PAGE_BASE(0) +  71)
+#define PCM186X_SIGDET_DC_LEV_CH2_L	(PCM186X_PAGE_BASE(0) +  72)
+#define PCM186X_SIGDET_DC_REF_CH2_R	(PCM186X_PAGE_BASE(0) +  73)
+#define PCM186X_SIGDET_DC_DIFF_CH2_R	(PCM186X_PAGE_BASE(0) +  74)
+#define PCM186X_SIGDET_DC_LEV_CH2_R	(PCM186X_PAGE_BASE(0) +  75)
+#define PCM186X_SIGDET_DC_REF_CH3_L	(PCM186X_PAGE_BASE(0) +  76)
+#define PCM186X_SIGDET_DC_DIFF_CH3_L	(PCM186X_PAGE_BASE(0) +  77)
+#define PCM186X_SIGDET_DC_LEV_CH3_L	(PCM186X_PAGE_BASE(0) +  78)
+#define PCM186X_SIGDET_DC_REF_CH3_R	(PCM186X_PAGE_BASE(0) +  79)
+#define PCM186X_SIGDET_DC_DIFF_CH3_R	(PCM186X_PAGE_BASE(0) +  80)
+#define PCM186X_SIGDET_DC_LEV_CH3_R	(PCM186X_PAGE_BASE(0) +  81)
+#define PCM186X_SIGDET_DC_REF_CH4_L	(PCM186X_PAGE_BASE(0) +  82)
+#define PCM186X_SIGDET_DC_DIFF_CH4_L	(PCM186X_PAGE_BASE(0) +  83)
+#define PCM186X_SIGDET_DC_LEV_CH4_L	(PCM186X_PAGE_BASE(0) +  84)
+#define PCM186X_SIGDET_DC_REF_CH4_R	(PCM186X_PAGE_BASE(0) +  85)
+#define PCM186X_SIGDET_DC_DIFF_CH4_R	(PCM186X_PAGE_BASE(0) +  86)
+#define PCM186X_SIGDET_DC_LEV_CH4_R	(PCM186X_PAGE_BASE(0) +  87)
+#define PCM186X_AUXADC_DATA_CTRL	(PCM186X_PAGE_BASE(0) +  88)
+#define PCM186X_AUXADC_DATA_LSB		(PCM186X_PAGE_BASE(0) +  89)
+#define PCM186X_AUXADC_DATA_MSB		(PCM186X_PAGE_BASE(0) +  90)
+#define PCM186X_INT_ENABLE		(PCM186X_PAGE_BASE(0) +  96)
+#define PCM186X_INT_FLAG		(PCM186X_PAGE_BASE(0) +  97)
+#define PCM186X_INT_POL_WIDTH		(PCM186X_PAGE_BASE(0) +  98)
+#define PCM186X_POWER_CTRL		(PCM186X_PAGE_BASE(0) + 112)
+#define PCM186X_FILTER_MUTE_CTRL	(PCM186X_PAGE_BASE(0) + 113)
+#define PCM186X_DEVICE_STATUS		(PCM186X_PAGE_BASE(0) + 114)
+#define PCM186X_FSAMPLE_STATUS		(PCM186X_PAGE_BASE(0) + 115)
+#define PCM186X_DIV_STATUS		(PCM186X_PAGE_BASE(0) + 116)
+#define PCM186X_CLK_STATUS		(PCM186X_PAGE_BASE(0) + 117)
+#define PCM186X_SUPPLY_STATUS		(PCM186X_PAGE_BASE(0) + 120)
+
+/* Register Definitions - Page 1 */
+#define PCM186X_MMAP_STAT_CTRL		(PCM186X_PAGE_BASE(1) +   1)
+#define PCM186X_MMAP_ADDRESS		(PCM186X_PAGE_BASE(1) +   2)
+#define PCM186X_MEM_WDATA0		(PCM186X_PAGE_BASE(1) +   4)
+#define PCM186X_MEM_WDATA1		(PCM186X_PAGE_BASE(1) +   5)
+#define PCM186X_MEM_WDATA2		(PCM186X_PAGE_BASE(1) +   6)
+#define PCM186X_MEM_WDATA3		(PCM186X_PAGE_BASE(1) +   7)
+#define PCM186X_MEM_RDATA0		(PCM186X_PAGE_BASE(1) +   8)
+#define PCM186X_MEM_RDATA1		(PCM186X_PAGE_BASE(1) +   9)
+#define PCM186X_MEM_RDATA2		(PCM186X_PAGE_BASE(1) +  10)
+#define PCM186X_MEM_RDATA3		(PCM186X_PAGE_BASE(1) +  11)
+
+/* Register Definitions - Page 3 */
+#define PCM186X_OSC_PWR_DOWN_CTRL	(PCM186X_PAGE_BASE(3) +  18)
+#define PCM186X_MIC_BIAS_CTRL		(PCM186X_PAGE_BASE(3) +  21)
+
+/* Register Definitions - Page 253 */
+#define PCM186X_CURR_TRIM_CTRL		(PCM186X_PAGE_BASE(253) +  20)
+
+#define PCM186X_MAX_REGISTER		PCM186X_CURR_TRIM_CTRL
+
+/* PCM186X_PAGE */
+#define PCM186X_RESET			0xff
+
+/* PCM186X_ADCX_INPUT_SEL_X */
+#define PCM186X_ADC_INPUT_SEL_POL	BIT(7)
+#define PCM186X_ADC_INPUT_SEL_MASK	GENMASK(5, 0)
+
+/* PCM186X_PCM_CFG */
+#define PCM186X_PCM_CFG_RX_WLEN_MASK	GENMASK(7, 6)
+#define PCM186X_PCM_CFG_RX_WLEN_SHIFT	6
+#define PCM186X_PCM_CFG_RX_WLEN_32	0x00
+#define PCM186X_PCM_CFG_RX_WLEN_24	0x01
+#define PCM186X_PCM_CFG_RX_WLEN_20	0x02
+#define PCM186X_PCM_CFG_RX_WLEN_16	0x03
+#define PCM186X_PCM_CFG_TDM_LRCK_MODE	BIT(4)
+#define PCM186X_PCM_CFG_TX_WLEN_MASK	GENMASK(3, 2)
+#define PCM186X_PCM_CFG_TX_WLEN_SHIFT	2
+#define PCM186X_PCM_CFG_TX_WLEN_32	0x00
+#define PCM186X_PCM_CFG_TX_WLEN_24	0x01
+#define PCM186X_PCM_CFG_TX_WLEN_20	0x02
+#define PCM186X_PCM_CFG_TX_WLEN_16	0x03
+#define PCM186X_PCM_CFG_FMT_MASK	GENMASK(1, 0)
+#define PCM186X_PCM_CFG_FMT_SHIFT	0
+#define PCM186X_PCM_CFG_FMT_I2S		0x00
+#define PCM186X_PCM_CFG_FMT_LEFTJ	0x01
+#define PCM186X_PCM_CFG_FMT_RIGHTJ	0x02
+#define PCM186X_PCM_CFG_FMT_TDM		0x03
+
+/* PCM186X_TDM_TX_SEL */
+#define PCM186X_TDM_TX_SEL_2CH		0x00
+#define PCM186X_TDM_TX_SEL_4CH		0x01
+#define PCM186X_TDM_TX_SEL_6CH		0x02
+#define PCM186X_TDM_TX_SEL_MASK		0x03
+
+/* PCM186X_CLK_CTRL */
+#define PCM186X_CLK_CTRL_SCK_XI_SEL1	BIT(7)
+#define PCM186X_CLK_CTRL_SCK_XI_SEL0	BIT(6)
+#define PCM186X_CLK_CTRL_SCK_SRC_PLL	BIT(5)
+#define PCM186X_CLK_CTRL_MST_MODE	BIT(4)
+#define PCM186X_CLK_CTRL_ADC_SRC_PLL	BIT(3)
+#define PCM186X_CLK_CTRL_DSP2_SRC_PLL	BIT(2)
+#define PCM186X_CLK_CTRL_DSP1_SRC_PLL	BIT(1)
+#define PCM186X_CLK_CTRL_CLKDET_EN	BIT(0)
+
+/* PCM186X_PLL_CTRL */
+#define PCM186X_PLL_CTRL_LOCK		BIT(4)
+#define PCM186X_PLL_CTRL_REF_SEL	BIT(1)
+#define PCM186X_PLL_CTRL_EN		BIT(0)
+
+/* PCM186X_POWER_CTRL */
+#define PCM186X_PWR_CTRL_PWRDN		BIT(2)
+#define PCM186X_PWR_CTRL_SLEEP		BIT(1)
+#define PCM186X_PWR_CTRL_STBY		BIT(0)
+
+/* PCM186X_CLK_STATUS */
+#define PCM186X_CLK_STATUS_LRCKHLT	BIT(6)
+#define PCM186X_CLK_STATUS_BCKHLT	BIT(5)
+#define PCM186X_CLK_STATUS_SCKHLT	BIT(4)
+#define PCM186X_CLK_STATUS_LRCKERR	BIT(2)
+#define PCM186X_CLK_STATUS_BCKERR	BIT(1)
+#define PCM186X_CLK_STATUS_SCKERR	BIT(0)
+
+/* PCM186X_SUPPLY_STATUS */
+#define PCM186X_SUPPLY_STATUS_DVDD	BIT(2)
+#define PCM186X_SUPPLY_STATUS_AVDD	BIT(1)
+#define PCM186X_SUPPLY_STATUS_LDO	BIT(0)
+
+/* PCM186X_MMAP_STAT_CTRL */
+#define PCM186X_MMAP_STAT_DONE		BIT(4)
+#define PCM186X_MMAP_STAT_BUSY		BIT(2)
+#define PCM186X_MMAP_STAT_R_REQ		BIT(1)
+#define PCM186X_MMAP_STAT_W_REQ		BIT(0)
+
+extern const struct regmap_config pcm186x_regmap;
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+		  struct regmap *regmap);
+int pcm186x_remove(struct device *dev);
+
+#endif /* _PCM186X_H_ */
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
index 25c63510ae1553f698eee4f40c5b5ae8d211be27..7cdd2dc4fd793e895d9149c0c5023f97f7ce29e3 100644
--- a/sound/soc/codecs/pcm512x-spi.c
+++ b/sound/soc/codecs/pcm512x-spi.c
@@ -70,3 +70,7 @@ static struct spi_driver pcm512x_spi_driver = {
 };
 
 module_spi_driver(pcm512x_spi_driver);
+
+MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI");
+MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 974a9040651db37da519ccb1e2cf5e2ff8d55ac2..7ef3b5476bcc2ecc3f291d151e99ce534230183e 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/regmap.h>
 
+#include <linux/gcd.h>
 #include "rl6231.h"
 
 /**
@@ -106,6 +107,25 @@ static const struct pll_calc_map pll_preset_table[] = {
 	{19200000,  24576000,  3, 30, 3, false},
 };
 
+static unsigned int find_best_div(unsigned int in,
+	unsigned int max, unsigned int div)
+{
+	unsigned int d;
+
+	if (in <= max)
+		return 1;
+
+	d = in / max;
+	if (in % max)
+		d++;
+
+	while (div % d != 0)
+		d++;
+
+
+	return d;
+}
+
 /**
  * rl6231_pll_calc - Calcualte PLL M/N/K code.
  * @freq_in: external clock provided to codec.
@@ -120,9 +140,11 @@ int rl6231_pll_calc(const unsigned int freq_in,
 	const unsigned int freq_out, struct rl6231_pll_code *pll_code)
 {
 	int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
-	int i, k, red, n_t, pll_out, in_t, out_t;
-	int n = 0, m = 0, m_t = 0;
-	int red_t = abs(freq_out - freq_in);
+	int i, k, n_t;
+	int k_t, min_k, max_k, n = 0, m = 0, m_t = 0;
+	unsigned int red, pll_out, in_t, out_t, div, div_t;
+	unsigned int red_t = abs(freq_out - freq_in);
+	unsigned int f_in, f_out, f_max;
 	bool bypass = false;
 
 	if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
@@ -140,39 +162,52 @@ int rl6231_pll_calc(const unsigned int freq_in,
 		}
 	}
 
-	k = 100000000 / freq_out - 2;
-	if (k > RL6231_PLL_K_MAX)
-		k = RL6231_PLL_K_MAX;
-	for (n_t = 0; n_t <= max_n; n_t++) {
-		in_t = freq_in / (k + 2);
-		pll_out = freq_out / (n_t + 2);
-		if (in_t < 0)
-			continue;
-		if (in_t == pll_out) {
-			bypass = true;
-			n = n_t;
-			goto code_find;
-		}
-		red = abs(in_t - pll_out);
-		if (red < red_t) {
-			bypass = true;
-			n = n_t;
-			m = m_t;
-			if (red == 0)
+	min_k = 80000000 / freq_out - 2;
+	max_k = 150000000 / freq_out - 2;
+	if (max_k > RL6231_PLL_K_MAX)
+		max_k = RL6231_PLL_K_MAX;
+	if (min_k > RL6231_PLL_K_MAX)
+		min_k = max_k = RL6231_PLL_K_MAX;
+	div_t = gcd(freq_in, freq_out);
+	f_max = 0xffffffff / RL6231_PLL_N_MAX;
+	div = find_best_div(freq_in, f_max, div_t);
+	f_in = freq_in / div;
+	f_out = freq_out / div;
+	k = min_k;
+	for (k_t = min_k; k_t <= max_k; k_t++) {
+		for (n_t = 0; n_t <= max_n; n_t++) {
+			in_t = f_in * (n_t + 2);
+			pll_out = f_out * (k_t + 2);
+			if (in_t == pll_out) {
+				bypass = true;
+				n = n_t;
+				k = k_t;
 				goto code_find;
-			red_t = red;
-		}
-		for (m_t = 0; m_t <= max_m; m_t++) {
-			out_t = in_t / (m_t + 2);
-			red = abs(out_t - pll_out);
+			}
+			out_t = in_t / (k_t + 2);
+			red = abs(f_out - out_t);
 			if (red < red_t) {
-				bypass = false;
+				bypass = true;
 				n = n_t;
-				m = m_t;
+				m = 0;
+				k = k_t;
 				if (red == 0)
 					goto code_find;
 				red_t = red;
 			}
+			for (m_t = 0; m_t <= max_m; m_t++) {
+				out_t = in_t / ((m_t + 2) * (k_t + 2));
+				red = abs(f_out - out_t);
+				if (red < red_t) {
+					bypass = false;
+					n = n_t;
+					m = m_t;
+					k = k_t;
+					if (red == 0)
+						goto code_find;
+					red_t = red;
+				}
+			}
 		}
 	}
 	pr_debug("Only get approximation about PLL\n");
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 64bf26cec20d535314551ae542f8c28ed25352dc..2144edca97b0959430be176feeec47ad0c6f265b 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -381,6 +381,7 @@ int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len)
 
 	return true;
 }
+EXPORT_SYMBOL_GPL(rt5514_spi_burst_read);
 
 /**
  * rt5514_spi_burst_write - Write data to SPI by rt5514 address.
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 2dd6e9f990a4c4c7a2377c5f159987a046220b7a..198df016802f826dff3681d5f6496c4484d3e431 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -295,6 +295,33 @@ static int rt5514_dsp_voice_wake_up_get(struct snd_kcontrol *kcontrol,
 	return 0;
 }
 
+static int rt5514_calibration(struct rt5514_priv *rt5514, bool on)
+{
+	if (on) {
+		regmap_write(rt5514->regmap, RT5514_ANA_CTRL_PLL3, 0x0000000a);
+		regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+			0xa);
+		regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301,
+			0x301);
+		regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL4,
+			0x80000000 | rt5514->pll3_cal_value);
+		regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL1,
+			0x8bb80800);
+		regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+			0xc0000000, 0x80000000);
+		regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+			0xc0000000, 0xc0000000);
+	} else {
+		regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+			0xc0000000, 0x40000000);
+		regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301, 0);
+		regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+			0x4);
+	}
+
+	return 0;
+}
+
 static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
 		struct snd_ctl_elem_value *ucontrol)
 {
@@ -302,6 +329,7 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
 	struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
 	struct snd_soc_codec *codec = rt5514->codec;
 	const struct firmware *fw = NULL;
+	u8 buf[8];
 
 	if (ucontrol->value.integer.value[0] == rt5514->dsp_enabled)
 		return 0;
@@ -310,6 +338,35 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
 		rt5514->dsp_enabled = ucontrol->value.integer.value[0];
 
 		if (rt5514->dsp_enabled) {
+			if (rt5514->pdata.dsp_calib_clk_name &&
+				!IS_ERR(rt5514->dsp_calib_clk)) {
+				if (clk_set_rate(rt5514->dsp_calib_clk,
+					rt5514->pdata.dsp_calib_clk_rate))
+					dev_err(codec->dev,
+						"Can't set rate for mclk");
+
+				if (clk_prepare_enable(rt5514->dsp_calib_clk))
+					dev_err(codec->dev,
+						"Can't enable dsp_calib_clk");
+
+				rt5514_calibration(rt5514, true);
+
+				msleep(20);
+#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
+				rt5514_spi_burst_read(RT5514_PLL3_CALIB_CTRL6 |
+					RT5514_DSP_MAPPING,
+					(u8 *)&buf, sizeof(buf));
+#else
+				dev_err(codec->dev, "There is no SPI driver for"
+					" loading the firmware\n");
+#endif
+				rt5514->pll3_cal_value = buf[0] | buf[1] << 8 |
+					buf[2] << 16 | buf[3] << 24;
+
+				rt5514_calibration(rt5514, false);
+				clk_disable_unprepare(rt5514->dsp_calib_clk);
+			}
+
 			rt5514_enable_dsp_prepare(rt5514);
 
 			request_firmware(&fw, RT5514_FIRMWARE1, codec->dev);
@@ -341,6 +398,20 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
 			/* DSP run */
 			regmap_write(rt5514->i2c_regmap, 0x18002f00,
 				0x00055148);
+
+			if (rt5514->pdata.dsp_calib_clk_name &&
+				!IS_ERR(rt5514->dsp_calib_clk)) {
+				msleep(20);
+
+				regmap_write(rt5514->i2c_regmap, 0x1800211c,
+					rt5514->pll3_cal_value);
+				regmap_write(rt5514->i2c_regmap, 0x18002124,
+					0x00220012);
+				regmap_write(rt5514->i2c_regmap, 0x18002124,
+					0x80220042);
+				regmap_write(rt5514->i2c_regmap, 0x18002124,
+					0xe0220042);
+			}
 		} else {
 			regmap_multi_reg_write(rt5514->i2c_regmap,
 				rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
@@ -1024,12 +1095,22 @@ static int rt5514_set_bias_level(struct snd_soc_codec *codec,
 static int rt5514_probe(struct snd_soc_codec *codec)
 {
 	struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+	struct platform_device *pdev = container_of(codec->dev,
+						   struct platform_device, dev);
 
 	rt5514->mclk = devm_clk_get(codec->dev, "mclk");
 	if (PTR_ERR(rt5514->mclk) == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
+	if (rt5514->pdata.dsp_calib_clk_name) {
+		rt5514->dsp_calib_clk = devm_clk_get(&pdev->dev,
+				rt5514->pdata.dsp_calib_clk_name);
+		if (PTR_ERR(rt5514->dsp_calib_clk) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+	}
+
 	rt5514->codec = codec;
+	rt5514->pll3_cal_value = 0x0078b000;
 
 	return 0;
 }
@@ -1147,6 +1228,10 @@ static int rt5514_parse_dp(struct rt5514_priv *rt5514, struct device *dev)
 {
 	device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
 		&rt5514->pdata.dmic_init_delay);
+	device_property_read_string(dev, "realtek,dsp-calib-clk-name",
+		&rt5514->pdata.dsp_calib_clk_name);
+	device_property_read_u32(dev, "realtek,dsp-calib-clk-rate",
+		&rt5514->pdata.dsp_calib_clk_rate);
 
 	return 0;
 }
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 2dc40e6d8b3f69835ded69ac71012cb765ac0a36..f0f3400ce6b15174dc5037274f372c615f8fa3d5 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -34,7 +34,9 @@
 #define RT5514_CLK_CTRL1			0x2104
 #define RT5514_CLK_CTRL2			0x2108
 #define RT5514_PLL3_CALIB_CTRL1			0x2110
+#define RT5514_PLL3_CALIB_CTRL4			0x2120
 #define RT5514_PLL3_CALIB_CTRL5			0x2124
+#define RT5514_PLL3_CALIB_CTRL6			0x2128
 #define RT5514_DELAY_BUF_CTRL1			0x2140
 #define RT5514_DELAY_BUF_CTRL3			0x2148
 #define RT5514_ASRC_IN_CTRL1			0x2180
@@ -272,7 +274,7 @@ struct rt5514_priv {
 	struct rt5514_platform_data pdata;
 	struct snd_soc_codec *codec;
 	struct regmap *i2c_regmap, *regmap;
-	struct clk *mclk;
+	struct clk *mclk, *dsp_calib_clk;
 	int sysclk;
 	int sysclk_src;
 	int lrck;
@@ -281,6 +283,7 @@ struct rt5514_priv {
 	int pll_in;
 	int pll_out;
 	int dsp_enabled;
+	unsigned int pll3_cal_value;
 };
 
 #endif /* __RT5514_H__ */
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index edc152c8a1fe7596e9bbc5760574d973a9185d16..789346cb30b9320f47d35d60fb63364216146bf4 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -1943,6 +1943,56 @@ static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
 	return 0;
 }
 
+static int rt5645_set_micbias1_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *k, int  event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+			RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+			RT5645_MICBIAS1_POW_CTRL_SEL_M);
+		break;
+
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+			RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+			RT5645_MICBIAS1_POW_CTRL_SEL_A);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt5645_set_micbias2_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *k, int  event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+			RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+			RT5645_MICBIAS2_POW_CTRL_SEL_M);
+		break;
+
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+			RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+			RT5645_MICBIAS2_POW_CTRL_SEL_A);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
 static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
 	SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
 		RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1980,10 +2030,12 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
 
 	/* Input Side */
 	/* micbias */
-	SND_SOC_DAPM_MICBIAS("micbias1", RT5645_PWR_ANLG2,
-			RT5645_PWR_MB1_BIT, 0),
-	SND_SOC_DAPM_MICBIAS("micbias2", RT5645_PWR_ANLG2,
-			RT5645_PWR_MB2_BIT, 0),
+	SND_SOC_DAPM_SUPPLY("micbias1", RT5645_PWR_ANLG2,
+			RT5645_PWR_MB1_BIT, 0, rt5645_set_micbias1_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("micbias2", RT5645_PWR_ANLG2,
+			RT5645_PWR_MB2_BIT, 0, rt5645_set_micbias2_event,
+			SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 	/* Input Lines */
 	SND_SOC_DAPM_INPUT("DMIC L1"),
 	SND_SOC_DAPM_INPUT("DMIC R1"),
@@ -3394,6 +3446,9 @@ static int rt5645_probe(struct snd_soc_codec *codec)
 		snd_soc_dapm_sync(dapm);
 	}
 
+	if (rt5645->pdata.long_name)
+		codec->component.card->long_name = rt5645->pdata.long_name;
+
 	rt5645->eq_param = devm_kzalloc(codec->dev,
 		RT5645_HWEQ_NUM * sizeof(struct rt5645_eq_param_s), GFP_KERNEL);
 
@@ -3570,63 +3625,74 @@ static const struct acpi_device_id rt5645_acpi_match[] = {
 MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
 #endif
 
-static const struct rt5645_platform_data general_platform_data = {
+static const struct rt5645_platform_data intel_braswell_platform_data = {
 	.dmic1_data_pin = RT5645_DMIC1_DISABLE,
 	.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
 	.jd_mode = 3,
 };
 
-static const struct dmi_system_id dmi_platform_intel_braswell[] = {
+static const struct rt5645_platform_data buddy_platform_data = {
+	.dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
+	.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
+	.jd_mode = 3,
+	.level_trigger_irq = true,
+};
+
+static const struct rt5645_platform_data gpd_win_platform_data = {
+	.jd_mode = 3,
+	.inv_jd1_1 = true,
+	.long_name = "gpd-win-pocket-rt5645",
+	/* The GPD pocket has a diff. mic, for the win this does not matter. */
+	.in2_diff = true,
+};
+
+static const struct rt5645_platform_data asus_t100ha_platform_data = {
+	.dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
+	.dmic2_data_pin = RT5645_DMIC2_DISABLE,
+	.jd_mode = 3,
+	.inv_jd1_1 = true,
+};
+
+static const struct rt5645_platform_data jd_mode3_platform_data = {
+	.jd_mode = 3,
+};
+
+static const struct dmi_system_id dmi_platform_data[] = {
+	{
+		.ident = "Chrome Buddy",
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
+		},
+		.driver_data = (void *)&buddy_platform_data,
+	},
 	{
 		.ident = "Intel Strago",
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
 		},
+		.driver_data = (void *)&intel_braswell_platform_data,
 	},
 	{
 		.ident = "Google Chrome",
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
 		},
+		.driver_data = (void *)&intel_braswell_platform_data,
 	},
 	{
 		.ident = "Google Setzer",
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
 		},
+		.driver_data = (void *)&intel_braswell_platform_data,
 	},
 	{
 		.ident = "Microsoft Surface 3",
 		.matches = {
 			DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
 		},
+		.driver_data = (void *)&intel_braswell_platform_data,
 	},
-	{ }
-};
-
-static const struct rt5645_platform_data buddy_platform_data = {
-	.dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
-	.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
-	.jd_mode = 3,
-	.level_trigger_irq = true,
-};
-
-static const struct dmi_system_id dmi_platform_intel_broadwell[] = {
-	{
-		.ident = "Chrome Buddy",
-		.matches = {
-			DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
-		},
-	},
-	{ }
-};
-
-static const struct rt5645_platform_data gpd_win_platform_data = {
-	.jd_mode = 3,
-	.inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_gpd_win[] = {
 	{
 		/*
 		 * Match for the GPDwin which unfortunately uses somewhat
@@ -3637,61 +3703,42 @@ static const struct dmi_system_id dmi_platform_gpd_win[] = {
 		 * the same default product_name. Also the GPDwin is the
 		 * only device to have both board_ and product_name not set.
 		 */
-		.ident = "GPD Win",
+		.ident = "GPD Win / Pocket",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
 			DMI_MATCH(DMI_BOARD_NAME, "Default string"),
 			DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
 		},
+		.driver_data = (void *)&gpd_win_platform_data,
 	},
-	{}
-};
-
-static const struct rt5645_platform_data general_platform_data2 = {
-	.dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
-	.dmic2_data_pin = RT5645_DMIC2_DISABLE,
-	.jd_mode = 3,
-	.inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_asus_t100ha[] = {
 	{
 		.ident = "ASUS T100HAN",
 		.matches = {
 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
 			DMI_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
 		},
+		.driver_data = (void *)&asus_t100ha_platform_data,
 	},
-	{ }
-};
-
-static const struct rt5645_platform_data minix_z83_4_platform_data = {
-	.jd_mode = 3,
-};
-
-static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
 	{
 		.ident = "MINIX Z83-4",
 		.matches = {
 			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MINIX"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
 		},
+		.driver_data = (void *)&jd_mode3_platform_data,
+	},
+	{
+		.ident = "Teclast X80 Pro",
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+			DMI_MATCH(DMI_PRODUCT_NAME, "X80 Pro"),
+		},
+		.driver_data = (void *)&jd_mode3_platform_data,
 	},
 	{ }
 };
 
-static bool rt5645_check_dp(struct device *dev)
-{
-	if (device_property_present(dev, "realtek,in2-differential") ||
-		device_property_present(dev, "realtek,dmic1-data-pin") ||
-		device_property_present(dev, "realtek,dmic2-data-pin") ||
-		device_property_present(dev, "realtek,jd-mode"))
-		return true;
-
-	return false;
-}
-
 static int rt5645_parse_dt(struct rt5645_priv *rt5645, struct device *dev)
 {
 	rt5645->pdata.in2_diff = device_property_read_bool(dev,
@@ -3710,6 +3757,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
 		    const struct i2c_device_id *id)
 {
 	struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
+	const struct dmi_system_id *dmi_data;
 	struct rt5645_priv *rt5645;
 	int ret, i;
 	unsigned int val;
@@ -3723,20 +3771,16 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
 	rt5645->i2c = i2c;
 	i2c_set_clientdata(i2c, rt5645);
 
+	dmi_data = dmi_first_match(dmi_platform_data);
+	if (dmi_data) {
+		dev_info(&i2c->dev, "Detected %s platform\n", dmi_data->ident);
+		pdata = dmi_data->driver_data;
+	}
+
 	if (pdata)
 		rt5645->pdata = *pdata;
-	else if (dmi_check_system(dmi_platform_intel_broadwell))
-		rt5645->pdata = buddy_platform_data;
-	else if (rt5645_check_dp(&i2c->dev))
+	else
 		rt5645_parse_dt(rt5645, &i2c->dev);
-	else if (dmi_check_system(dmi_platform_intel_braswell))
-		rt5645->pdata = general_platform_data;
-	else if (dmi_check_system(dmi_platform_gpd_win))
-		rt5645->pdata = gpd_win_platform_data;
-	else if (dmi_check_system(dmi_platform_asus_t100ha))
-		rt5645->pdata = general_platform_data2;
-	else if (dmi_check_system(dmi_platform_minix_z83_4))
-		rt5645->pdata = minix_z83_4_platform_data;
 
 	if (quirk != -1) {
 		rt5645->pdata.in2_diff = QUIRK_IN2_DIFF(quirk);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index cfc5f97549eb656b9b861a612e0d60b40c2e93be..940325b28c299daa7d3b2b4dafda5e059a85b679 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2117,6 +2117,12 @@ enum {
 #define RT5645_RXDC_SRC_STO			(0x0 << 7)
 #define RT5645_RXDC_SRC_MONO			(0x1 << 7)
 #define RT5645_RXDC_SRC_SFT			(7)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_MASK	(0x1 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_A		(0x0 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_M		(0x1 << 5)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_MASK	(0x1 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_A		(0x0 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_M		(0x1 << 4)
 #define RT5645_RXDP2_SEL_MASK			(0x1 << 3)
 #define RT5645_RXDP2_SEL_IF2			(0x0 << 3)
 #define RT5645_RXDP2_SEL_ADC			(0x1 << 3)
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 354dc0d64f11fbd29f59d324c8fd4fb9449acaee..7b91ee267b4eb24347e90c72894fb1daee3b6467 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -231,14 +231,17 @@ static struct snd_soc_dai_driver si476x_dai = {
 	.ops		= &si476x_dai_ops,
 };
 
-static struct regmap *si476x_get_regmap(struct device *dev)
+static int si476x_probe(struct snd_soc_component *component)
 {
-	return dev_get_regmap(dev->parent, NULL);
+	snd_soc_component_init_regmap(component,
+				dev_get_regmap(component->dev->parent, NULL));
+
+	return 0;
 }
 
 static const struct snd_soc_codec_driver soc_codec_dev_si476x = {
-	.get_regmap = si476x_get_regmap,
 	.component_driver = {
+		.probe			= si476x_probe,
 		.dapm_widgets		= si476x_dapm_widgets,
 		.num_dapm_widgets	= ARRAY_SIZE(si476x_dapm_widgets),
 		.dapm_routes		= si476x_dapm_routes,
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
index 7acd05140a810c53065186ce6aa6dfa22235c6f7..c8fd6367f6c00976e73108dfebcb41ad6cbafba7 100644
--- a/sound/soc/codecs/spdif_receiver.c
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -34,10 +34,11 @@ static const struct snd_soc_dapm_route dir_routes[] = {
 #define STUB_RATES	SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | \
 			SNDRV_PCM_FMTBIT_S20_3LE | \
-			SNDRV_PCM_FMTBIT_S24_LE | \
+			SNDRV_PCM_FMTBIT_S24_LE  | \
+			SNDRV_PCM_FMTBIT_S32_LE | \
 			SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
 
-static const struct snd_soc_codec_driver soc_codec_spdif_dir = {
+static struct snd_soc_codec_driver soc_codec_spdif_dir = {
 	.component_driver = {
 		.dapm_widgets		= dir_widgets,
 		.num_dapm_widgets	= ARRAY_SIZE(dir_widgets),
diff --git a/sound/soc/codecs/spdif_transmitter.c b/sound/soc/codecs/spdif_transmitter.c
index 063a64ff82d3ed9c764f917b22f22ff291d3c3d1..037aa1d4555921e70b403a3d27706464d72b7633 100644
--- a/sound/soc/codecs/spdif_transmitter.c
+++ b/sound/soc/codecs/spdif_transmitter.c
@@ -27,7 +27,8 @@
 #define STUB_RATES	SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | \
 			SNDRV_PCM_FMTBIT_S20_3LE | \
-			SNDRV_PCM_FMTBIT_S24_LE)
+			SNDRV_PCM_FMTBIT_S24_LE  | \
+			SNDRV_PCM_FMTBIT_S32_LE)
 
 static const struct snd_soc_dapm_widget dit_widgets[] = {
 	SND_SOC_DAPM_OUTPUT("spdif-out"),
@@ -37,7 +38,7 @@ static const struct snd_soc_dapm_route dit_routes[] = {
 	{ "spdif-out", NULL, "Playback" },
 };
 
-static const struct snd_soc_codec_driver soc_codec_spdif_dit = {
+static struct snd_soc_codec_driver soc_codec_spdif_dit = {
 	.component_driver = {
 		.dapm_widgets		= dit_widgets,
 		.num_dapm_widgets	= ARRAY_SIZE(dit_widgets),
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index a736a2a6976c2fb6eefd2994c413ea35159b3724..f3006f301fe8c7cf0cd10b364c7159a07238e598 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -36,6 +36,11 @@
 /* Define how often to check (and clear) the fault status register (in ms) */
 #define TAS5720_FAULT_CHECK_INTERVAL		200
 
+enum tas572x_type {
+	TAS5720,
+	TAS5722,
+};
+
 static const char * const tas5720_supply_names[] = {
 	"dvdd",		/* Digital power supply. Connect to 3.3-V supply. */
 	"pvdd",		/* Class-D amp and analog power supply (connected). */
@@ -47,6 +52,7 @@ struct tas5720_data {
 	struct snd_soc_codec *codec;
 	struct regmap *regmap;
 	struct i2c_client *tas5720_client;
+	enum tas572x_type devtype;
 	struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
 	struct delayed_work fault_check_work;
 	unsigned int last_fault;
@@ -264,7 +270,7 @@ out:
 static int tas5720_codec_probe(struct snd_soc_codec *codec)
 {
 	struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
-	unsigned int device_id;
+	unsigned int device_id, expected_device_id;
 	int ret;
 
 	tas5720->codec = codec;
@@ -276,6 +282,11 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
 		return ret;
 	}
 
+	/*
+	 * Take a liberal approach to checking the device ID to allow the
+	 * driver to be used even if the device ID does not match, however
+	 * issue a warning if there is a mismatch.
+	 */
 	ret = regmap_read(tas5720->regmap, TAS5720_DEVICE_ID_REG, &device_id);
 	if (ret < 0) {
 		dev_err(codec->dev, "failed to read device ID register: %d\n",
@@ -283,13 +294,22 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
 		goto probe_fail;
 	}
 
-	if (device_id != TAS5720_DEVICE_ID) {
-		dev_err(codec->dev, "wrong device ID. expected: %u read: %u\n",
-			TAS5720_DEVICE_ID, device_id);
-		ret = -ENODEV;
-		goto probe_fail;
+	switch (tas5720->devtype) {
+	case TAS5720:
+		expected_device_id = TAS5720_DEVICE_ID;
+		break;
+	case TAS5722:
+		expected_device_id = TAS5722_DEVICE_ID;
+		break;
+	default:
+		dev_err(codec->dev, "unexpected private driver data\n");
+		return -EINVAL;
 	}
 
+	if (device_id != expected_device_id)
+		dev_warn(codec->dev, "wrong device ID. expected: %u read: %u\n",
+			 expected_device_id, device_id);
+
 	/* Set device to mute */
 	ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
 				  TAS5720_MUTE, TAS5720_MUTE);
@@ -446,6 +466,15 @@ static const struct regmap_config tas5720_regmap_config = {
 	.volatile_reg = tas5720_is_volatile_reg,
 };
 
+static const struct regmap_config tas5722_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.max_register = TAS5722_MAX_REG,
+	.cache_type = REGCACHE_RBTREE,
+	.volatile_reg = tas5720_is_volatile_reg,
+};
+
 /*
  * DAC analog gain. There are four discrete values to select from, ranging
  * from 19.2 dB to 26.3dB.
@@ -544,6 +573,7 @@ static int tas5720_probe(struct i2c_client *client,
 {
 	struct device *dev = &client->dev;
 	struct tas5720_data *data;
+	const struct regmap_config *regmap_config;
 	int ret;
 	int i;
 
@@ -552,7 +582,20 @@ static int tas5720_probe(struct i2c_client *client,
 		return -ENOMEM;
 
 	data->tas5720_client = client;
-	data->regmap = devm_regmap_init_i2c(client, &tas5720_regmap_config);
+	data->devtype = id->driver_data;
+
+	switch (id->driver_data) {
+	case TAS5720:
+		regmap_config = &tas5720_regmap_config;
+		break;
+	case TAS5722:
+		regmap_config = &tas5722_regmap_config;
+		break;
+	default:
+		dev_err(dev, "unexpected private driver data\n");
+		return -EINVAL;
+	}
+	data->regmap = devm_regmap_init_i2c(client, regmap_config);
 	if (IS_ERR(data->regmap)) {
 		ret = PTR_ERR(data->regmap);
 		dev_err(dev, "failed to allocate register map: %d\n", ret);
@@ -592,7 +635,8 @@ static int tas5720_remove(struct i2c_client *client)
 }
 
 static const struct i2c_device_id tas5720_id[] = {
-	{ "tas5720", 0 },
+	{ "tas5720", TAS5720 },
+	{ "tas5722", TAS5722 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, tas5720_id);
@@ -600,6 +644,7 @@ MODULE_DEVICE_TABLE(i2c, tas5720_id);
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id tas5720_of_match[] = {
 	{ .compatible = "ti,tas5720", },
+	{ .compatible = "ti,tas5722", },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, tas5720_of_match);
diff --git a/sound/soc/codecs/tas5720.h b/sound/soc/codecs/tas5720.h
index 3d077c779b1260f5077541131f5bc03262810e81..1dda3095961d11592f93beddcc568a992d77322f 100644
--- a/sound/soc/codecs/tas5720.h
+++ b/sound/soc/codecs/tas5720.h
@@ -30,8 +30,14 @@
 #define TAS5720_DIGITAL_CLIP1_REG	0x11
 #define TAS5720_MAX_REG			TAS5720_DIGITAL_CLIP1_REG
 
+/* Additional TAS5722-specific Registers */
+#define TAS5722_DIGITAL_CTRL2_REG	0x13
+#define TAS5722_ANALOG_CTRL2_REG	0x14
+#define TAS5722_MAX_REG			TAS5722_ANALOG_CTRL2_REG
+
 /* TAS5720_DEVICE_ID_REG */
 #define TAS5720_DEVICE_ID		0x01
+#define TAS5722_DEVICE_ID		0x12
 
 /* TAS5720_POWER_CTRL_REG */
 #define TAS5720_DIG_CLIP_MASK		GENMASK(7, 2)
@@ -51,6 +57,7 @@
 #define TAS5720_SAIF_FORMAT_MASK	GENMASK(2, 0)
 
 /* TAS5720_DIGITAL_CTRL2_REG */
+#define TAS5722_VOL_RAMP_RATE		BIT(6)
 #define TAS5720_MUTE			BIT(4)
 #define TAS5720_TDM_SLOT_SEL_MASK	GENMASK(2, 0)
 
@@ -87,4 +94,28 @@
 #define TAS5720_CLIP1_MASK		GENMASK(7, 2)
 #define TAS5720_CLIP1_SHIFT		(0x2)
 
+/* TAS5722_DIGITAL_CTRL2_REG */
+#define TAS5722_HPF_3_7HZ		(0x0 << 5)
+#define TAS5722_HPF_7_4HZ		(0x1 << 5)
+#define TAS5722_HPF_14_9HZ		(0x2 << 5)
+#define TAS5722_HPF_29_7HZ		(0x3 << 5)
+#define TAS5722_HPF_59_4HZ		(0x4 << 5)
+#define TAS5722_HPF_118_4HZ		(0x5 << 5)
+#define TAS5722_HPF_235_0HZ		(0x6 << 5)
+#define TAS5722_HPF_463_2HZ		(0x7 << 5)
+#define TAS5722_HPF_MASK		GENMASK(7, 5)
+#define TAS5722_AUTO_SLEEP_OFF		(0x0 << 3)
+#define TAS5722_AUTO_SLEEP_1024LR	(0x1 << 3)
+#define TAS5722_AUTO_SLEEP_65536LR	(0x2 << 3)
+#define TAS5722_AUTO_SLEEP_262144LR	(0x3 << 3)
+#define TAS5722_AUTO_SLEEP_MASK		GENMASK(4, 3)
+#define TAS5722_TDM_SLOT_16B		BIT(2)
+#define TAS5722_MCLK_PIN_CFG		BIT(1)
+#define TAS5722_VOL_CONTROL_LSB		BIT(0)
+
+/* TAS5722_ANALOG_CTRL2_REG */
+#define TAS5722_FAULTZ_PU		BIT(3)
+#define TAS5722_VREG_LVL		BIT(2)
+#define TAS5722_PWR_TUNE		BIT(0)
+
 #endif /* __TAS5720_H__ */
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
new file mode 100644
index 0000000000000000000000000000000000000000..49b87f6e85bf2329f3611c37485e62727c54a972
--- /dev/null
+++ b/sound/soc/codecs/tas6424.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *	Author: Andreas Dannenberg <dannenberg@ti.com>
+ *	Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas6424.h"
+
+/* Define how often to check (and clear) the fault status register (in ms) */
+#define TAS6424_FAULT_CHECK_INTERVAL 200
+
+static const char * const tas6424_supply_names[] = {
+	"dvdd", /* Digital power supply. Connect to 3.3-V supply. */
+	"vbat", /* Supply used for higher voltage analog circuits. */
+	"pvdd", /* Class-D amp output FETs supply. */
+};
+#define TAS6424_NUM_SUPPLIES ARRAY_SIZE(tas6424_supply_names)
+
+struct tas6424_data {
+	struct device *dev;
+	struct regmap *regmap;
+	struct regulator_bulk_data supplies[TAS6424_NUM_SUPPLIES];
+	struct delayed_work fault_check_work;
+	unsigned int last_fault1;
+	unsigned int last_fault2;
+	unsigned int last_warn;
+};
+
+/*
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
+ * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
+ * as per device datasheet.
+ */
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+
+static const struct snd_kcontrol_new tas6424_snd_controls[] = {
+	SOC_SINGLE_TLV("Speaker Driver CH1 Playback Volume",
+		       TAS6424_CH1_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+	SOC_SINGLE_TLV("Speaker Driver CH2 Playback Volume",
+		       TAS6424_CH2_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+	SOC_SINGLE_TLV("Speaker Driver CH3 Playback Volume",
+		       TAS6424_CH3_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+	SOC_SINGLE_TLV("Speaker Driver CH4 Playback Volume",
+		       TAS6424_CH4_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+};
+
+static int tas6424_dac_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s() event=0x%0x\n", __func__, event);
+
+	if (event & SND_SOC_DAPM_POST_PMU) {
+		/* Observe codec shutdown-to-active time */
+		msleep(12);
+
+		/* Turn on TAS6424 periodic fault checking/handling */
+		tas6424->last_fault1 = 0;
+		tas6424->last_fault2 = 0;
+		tas6424->last_warn = 0;
+		schedule_delayed_work(&tas6424->fault_check_work,
+				      msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+	} else if (event & SND_SOC_DAPM_PRE_PMD) {
+		/* Disable TAS6424 periodic fault checking/handling */
+		cancel_delayed_work_sync(&tas6424->fault_check_work);
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget tas6424_dapm_widgets[] = {
+	SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas6424_dac_event,
+			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUTPUT("OUT")
+};
+
+static const struct snd_soc_dapm_route tas6424_audio_map[] = {
+	{ "DAC", NULL, "DAC IN" },
+	{ "OUT", NULL, "DAC" },
+};
+
+static int tas6424_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params,
+			     struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	unsigned int rate = params_rate(params);
+	unsigned int width = params_width(params);
+	u8 sap_ctrl = 0;
+
+	dev_dbg(codec->dev, "%s() rate=%u width=%u\n", __func__, rate, width);
+
+	switch (rate) {
+	case 44100:
+		sap_ctrl |= TAS6424_SAP_RATE_44100;
+		break;
+	case 48000:
+		sap_ctrl |= TAS6424_SAP_RATE_48000;
+		break;
+	case 96000:
+		sap_ctrl |= TAS6424_SAP_RATE_96000;
+		break;
+	default:
+		dev_err(codec->dev, "unsupported sample rate: %u\n", rate);
+		return -EINVAL;
+	}
+
+	switch (width) {
+	case 16:
+		sap_ctrl |= TAS6424_SAP_TDM_SLOT_SZ_16;
+		break;
+	case 24:
+		break;
+	default:
+		dev_err(codec->dev, "unsupported sample width: %u\n", width);
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+			    TAS6424_SAP_RATE_MASK |
+			    TAS6424_SAP_TDM_SLOT_SZ_16,
+			    sap_ctrl);
+
+	return 0;
+}
+
+static int tas6424_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	u8 serial_format = 0;
+
+	dev_dbg(codec->dev, "%s() fmt=0x%0x\n", __func__, fmt);
+
+	/* clock masters */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+		return -EINVAL;
+	}
+
+	/* signal polarity */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	default:
+		dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
+		return -EINVAL;
+	}
+
+	/* interface format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		serial_format |= TAS6424_SAP_I2S;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		serial_format |= TAS6424_SAP_DSP;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		/*
+		 * We can use the fact that the TAS6424 does not care about the
+		 * LRCLK duty cycle during TDM to receive DSP_B formatted data
+		 * in LEFTJ mode (no delaying of the 1st data bit).
+		 */
+		serial_format |= TAS6424_SAP_LEFTJ;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		serial_format |= TAS6424_SAP_LEFTJ;
+		break;
+	default:
+		dev_err(codec->dev, "Invalid DAI interface format\n");
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+			    TAS6424_SAP_FMT_MASK, serial_format);
+
+	return 0;
+}
+
+static int tas6424_set_dai_tdm_slot(struct snd_soc_dai *dai,
+				    unsigned int tx_mask, unsigned int rx_mask,
+				    int slots, int slot_width)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	unsigned int first_slot, last_slot;
+	bool sap_tdm_slot_last;
+
+	dev_dbg(codec->dev, "%s() tx_mask=%d rx_mask=%d\n", __func__,
+		tx_mask, rx_mask);
+
+	if (!tx_mask || !rx_mask)
+		return 0; /* nothing needed to disable TDM mode */
+
+	/*
+	 * Determine the first slot and last slot that is being requested so
+	 * we'll be able to more easily enforce certain constraints as the
+	 * TAS6424's TDM interface is not fully configurable.
+	 */
+	first_slot = __ffs(tx_mask);
+	last_slot = __fls(rx_mask);
+
+	if (last_slot - first_slot != 4) {
+		dev_err(codec->dev, "tdm mask must cover 4 contiguous slots\n");
+		return -EINVAL;
+	}
+
+	switch (first_slot) {
+	case 0:
+		sap_tdm_slot_last = false;
+		break;
+	case 4:
+		sap_tdm_slot_last = true;
+		break;
+	default:
+		dev_err(codec->dev, "tdm mask must start at slot 0 or 4\n");
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, TAS6424_SAP_CTRL, TAS6424_SAP_TDM_SLOT_LAST,
+			    sap_tdm_slot_last ? TAS6424_SAP_TDM_SLOT_LAST : 0);
+
+	return 0;
+}
+
+static int tas6424_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	unsigned int val;
+
+	dev_dbg(codec->dev, "%s() mute=%d\n", __func__, mute);
+
+	if (mute)
+		val = TAS6424_ALL_STATE_MUTE;
+	else
+		val = TAS6424_ALL_STATE_PLAY;
+
+	snd_soc_write(codec, TAS6424_CH_STATE_CTRL, val);
+
+	return 0;
+}
+
+static int tas6424_power_off(struct snd_soc_codec *codec)
+{
+	struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_HIZ);
+
+	regcache_cache_only(tas6424->regmap, true);
+	regcache_mark_dirty(tas6424->regmap);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+				     tas6424->supplies);
+	if (ret < 0) {
+		dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tas6424_power_on(struct snd_soc_codec *codec)
+{
+	struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+				    tas6424->supplies);
+	if (ret < 0) {
+		dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
+	regcache_cache_only(tas6424->regmap, false);
+
+	ret = regcache_sync(tas6424->regmap);
+	if (ret < 0) {
+		dev_err(codec->dev, "failed to sync regcache: %d\n", ret);
+		return ret;
+	}
+
+	snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_MUTE);
+
+	/* any time we come out of HIZ, the output channels automatically run DC
+	 * load diagnostics, wait here until this completes
+	 */
+	msleep(230);
+
+	return 0;
+}
+
+static int tas6424_set_bias_level(struct snd_soc_codec *codec,
+				  enum snd_soc_bias_level level)
+{
+	dev_dbg(codec->dev, "%s() level=%d\n", __func__, level);
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+			tas6424_power_on(codec);
+		break;
+	case SND_SOC_BIAS_OFF:
+		tas6424_power_off(codec);
+		break;
+	}
+
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tas6424 = {
+	.set_bias_level = tas6424_set_bias_level,
+	.idle_bias_off = true,
+
+	.component_driver = {
+		.controls = tas6424_snd_controls,
+		.num_controls = ARRAY_SIZE(tas6424_snd_controls),
+		.dapm_widgets = tas6424_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(tas6424_dapm_widgets),
+		.dapm_routes = tas6424_audio_map,
+		.num_dapm_routes = ARRAY_SIZE(tas6424_audio_map),
+	},
+};
+
+static struct snd_soc_dai_ops tas6424_speaker_dai_ops = {
+	.hw_params	= tas6424_hw_params,
+	.set_fmt	= tas6424_set_dai_fmt,
+	.set_tdm_slot	= tas6424_set_dai_tdm_slot,
+	.digital_mute	= tas6424_mute,
+};
+
+static struct snd_soc_dai_driver tas6424_dai[] = {
+	{
+		.name = "tas6424-amplifier",
+		.playback = {
+			.stream_name = "Playback",
+			.channels_min = 1,
+			.channels_max = 4,
+			.rates = TAS6424_RATES,
+			.formats = TAS6424_FORMATS,
+		},
+		.ops = &tas6424_speaker_dai_ops,
+	},
+};
+
+static void tas6424_fault_check_work(struct work_struct *work)
+{
+	struct tas6424_data *tas6424 = container_of(work, struct tas6424_data,
+						    fault_check_work.work);
+	struct device *dev = tas6424->dev;
+	unsigned int reg;
+	int ret;
+
+	ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT1, &reg);
+	if (ret < 0) {
+		dev_err(dev, "failed to read FAULT1 register: %d\n", ret);
+		goto out;
+	}
+
+	/*
+	 * Ignore any clock faults as there is no clean way to check for them.
+	 * We would need to start checking for those faults *after* the SAIF
+	 * stream has been setup, and stop checking *before* the stream is
+	 * stopped to avoid any false-positives. However there are no
+	 * appropriate hooks to monitor these events.
+	 */
+	reg &= TAS6424_FAULT_PVDD_OV |
+	       TAS6424_FAULT_VBAT_OV |
+	       TAS6424_FAULT_PVDD_UV |
+	       TAS6424_FAULT_VBAT_UV;
+
+	if (reg)
+		goto check_global_fault2_reg;
+
+	/*
+	 * Only flag errors once for a given occurrence. This is needed as
+	 * the TAS6424 will take time clearing the fault condition internally
+	 * during which we don't want to bombard the system with the same
+	 * error message over and over.
+	 */
+	if ((reg & TAS6424_FAULT_PVDD_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_OV))
+		dev_crit(dev, "experienced a PVDD overvoltage fault\n");
+
+	if ((reg & TAS6424_FAULT_VBAT_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_OV))
+		dev_crit(dev, "experienced a VBAT overvoltage fault\n");
+
+	if ((reg & TAS6424_FAULT_PVDD_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_UV))
+		dev_crit(dev, "experienced a PVDD undervoltage fault\n");
+
+	if ((reg & TAS6424_FAULT_VBAT_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_UV))
+		dev_crit(dev, "experienced a VBAT undervoltage fault\n");
+
+	/* Store current fault1 value so we can detect any changes next time */
+	tas6424->last_fault1 = reg;
+
+check_global_fault2_reg:
+	ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT2, &reg);
+	if (ret < 0) {
+		dev_err(dev, "failed to read FAULT2 register: %d\n", ret);
+		goto out;
+	}
+
+	reg &= TAS6424_FAULT_OTSD |
+	       TAS6424_FAULT_OTSD_CH1 |
+	       TAS6424_FAULT_OTSD_CH2 |
+	       TAS6424_FAULT_OTSD_CH3 |
+	       TAS6424_FAULT_OTSD_CH4;
+
+	if (!reg)
+		goto check_warn_reg;
+
+	if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
+		dev_crit(dev, "experienced a global overtemp shutdown\n");
+
+	if ((reg & TAS6424_FAULT_OTSD_CH1) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH1))
+		dev_crit(dev, "experienced an overtemp shutdown on CH1\n");
+
+	if ((reg & TAS6424_FAULT_OTSD_CH2) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH2))
+		dev_crit(dev, "experienced an overtemp shutdown on CH2\n");
+
+	if ((reg & TAS6424_FAULT_OTSD_CH3) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH3))
+		dev_crit(dev, "experienced an overtemp shutdown on CH3\n");
+
+	if ((reg & TAS6424_FAULT_OTSD_CH4) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH4))
+		dev_crit(dev, "experienced an overtemp shutdown on CH4\n");
+
+	/* Store current fault2 value so we can detect any changes next time */
+	tas6424->last_fault2 = reg;
+
+check_warn_reg:
+	ret = regmap_read(tas6424->regmap, TAS6424_WARN, &reg);
+	if (ret < 0) {
+		dev_err(dev, "failed to read WARN register: %d\n", ret);
+		goto out;
+	}
+
+	reg &= TAS6424_WARN_VDD_UV |
+	       TAS6424_WARN_VDD_POR |
+	       TAS6424_WARN_VDD_OTW |
+	       TAS6424_WARN_VDD_OTW_CH1 |
+	       TAS6424_WARN_VDD_OTW_CH2 |
+	       TAS6424_WARN_VDD_OTW_CH3 |
+	       TAS6424_WARN_VDD_OTW_CH4;
+
+	if (!reg)
+		goto out;
+
+	if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
+		dev_warn(dev, "experienced a VDD under voltage condition\n");
+
+	if ((reg & TAS6424_WARN_VDD_POR) && !(tas6424->last_warn & TAS6424_WARN_VDD_POR))
+		dev_warn(dev, "experienced a VDD POR condition\n");
+
+	if ((reg & TAS6424_WARN_VDD_OTW) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW))
+		dev_warn(dev, "experienced a global overtemp warning\n");
+
+	if ((reg & TAS6424_WARN_VDD_OTW_CH1) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH1))
+		dev_warn(dev, "experienced an overtemp warning on CH1\n");
+
+	if ((reg & TAS6424_WARN_VDD_OTW_CH2) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH2))
+		dev_warn(dev, "experienced an overtemp warning on CH2\n");
+
+	if ((reg & TAS6424_WARN_VDD_OTW_CH3) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH3))
+		dev_warn(dev, "experienced an overtemp warning on CH3\n");
+
+	if ((reg & TAS6424_WARN_VDD_OTW_CH4) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH4))
+		dev_warn(dev, "experienced an overtemp warning on CH4\n");
+
+	/* Store current warn value so we can detect any changes next time */
+	tas6424->last_warn = reg;
+
+	/* Clear any faults by toggling the CLEAR_FAULT control bit */
+	ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+				TAS6424_CLEAR_FAULT, TAS6424_CLEAR_FAULT);
+	if (ret < 0)
+		dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+	ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+				TAS6424_CLEAR_FAULT, 0);
+	if (ret < 0)
+		dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+out:
+	/* Schedule the next fault check at the specified interval */
+	schedule_delayed_work(&tas6424->fault_check_work,
+			      msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+}
+
+static const struct reg_default tas6424_reg_defaults[] = {
+	{ TAS6424_MODE_CTRL,		0x00 },
+	{ TAS6424_MISC_CTRL1,		0x32 },
+	{ TAS6424_MISC_CTRL2,		0x62 },
+	{ TAS6424_SAP_CTRL,		0x04 },
+	{ TAS6424_CH_STATE_CTRL,	0x55 },
+	{ TAS6424_CH1_VOL_CTRL,		0xcf },
+	{ TAS6424_CH2_VOL_CTRL,		0xcf },
+	{ TAS6424_CH3_VOL_CTRL,		0xcf },
+	{ TAS6424_CH4_VOL_CTRL,		0xcf },
+	{ TAS6424_DC_DIAG_CTRL1,	0x00 },
+	{ TAS6424_DC_DIAG_CTRL2,	0x11 },
+	{ TAS6424_DC_DIAG_CTRL3,	0x11 },
+	{ TAS6424_PIN_CTRL,		0xff },
+	{ TAS6424_AC_DIAG_CTRL1,	0x00 },
+	{ TAS6424_MISC_CTRL3,		0x00 },
+	{ TAS6424_CLIP_CTRL,		0x01 },
+	{ TAS6424_CLIP_WINDOW,		0x14 },
+	{ TAS6424_CLIP_WARN,		0x00 },
+	{ TAS6424_CBC_STAT,		0x00 },
+	{ TAS6424_MISC_CTRL4,		0x40 },
+};
+
+static bool tas6424_is_writable_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case TAS6424_MODE_CTRL:
+	case TAS6424_MISC_CTRL1:
+	case TAS6424_MISC_CTRL2:
+	case TAS6424_SAP_CTRL:
+	case TAS6424_CH_STATE_CTRL:
+	case TAS6424_CH1_VOL_CTRL:
+	case TAS6424_CH2_VOL_CTRL:
+	case TAS6424_CH3_VOL_CTRL:
+	case TAS6424_CH4_VOL_CTRL:
+	case TAS6424_DC_DIAG_CTRL1:
+	case TAS6424_DC_DIAG_CTRL2:
+	case TAS6424_DC_DIAG_CTRL3:
+	case TAS6424_PIN_CTRL:
+	case TAS6424_AC_DIAG_CTRL1:
+	case TAS6424_MISC_CTRL3:
+	case TAS6424_CLIP_CTRL:
+	case TAS6424_CLIP_WINDOW:
+	case TAS6424_CLIP_WARN:
+	case TAS6424_CBC_STAT:
+	case TAS6424_MISC_CTRL4:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool tas6424_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case TAS6424_DC_LOAD_DIAG_REP12:
+	case TAS6424_DC_LOAD_DIAG_REP34:
+	case TAS6424_DC_LOAD_DIAG_REPLO:
+	case TAS6424_CHANNEL_STATE:
+	case TAS6424_CHANNEL_FAULT:
+	case TAS6424_GLOB_FAULT1:
+	case TAS6424_GLOB_FAULT2:
+	case TAS6424_WARN:
+	case TAS6424_AC_LOAD_DIAG_REP1:
+	case TAS6424_AC_LOAD_DIAG_REP2:
+	case TAS6424_AC_LOAD_DIAG_REP3:
+	case TAS6424_AC_LOAD_DIAG_REP4:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static const struct regmap_config tas6424_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.writeable_reg = tas6424_is_writable_reg,
+	.volatile_reg = tas6424_is_volatile_reg,
+
+	.max_register = TAS6424_MAX,
+	.reg_defaults = tas6424_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(tas6424_reg_defaults),
+	.cache_type = REGCACHE_RBTREE,
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tas6424_of_ids[] = {
+	{ .compatible = "ti,tas6424", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tas6424_of_ids);
+#endif
+
+static int tas6424_i2c_probe(struct i2c_client *client,
+			     const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+	struct tas6424_data *tas6424;
+	int ret;
+	int i;
+
+	tas6424 = devm_kzalloc(dev, sizeof(*tas6424), GFP_KERNEL);
+	if (!tas6424)
+		return -ENOMEM;
+	dev_set_drvdata(dev, tas6424);
+
+	tas6424->dev = dev;
+
+	tas6424->regmap = devm_regmap_init_i2c(client, &tas6424_regmap_config);
+	if (IS_ERR(tas6424->regmap)) {
+		ret = PTR_ERR(tas6424->regmap);
+		dev_err(dev, "unable to allocate register map: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(tas6424->supplies); i++)
+		tas6424->supplies[i].supply = tas6424_supply_names[i];
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(tas6424->supplies),
+				      tas6424->supplies);
+	if (ret) {
+		dev_err(dev, "unable to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+				    tas6424->supplies);
+	if (ret) {
+		dev_err(dev, "unable to enable supplies: %d\n", ret);
+		return ret;
+	}
+
+	/* Reset device to establish well-defined startup state */
+	ret = regmap_update_bits(tas6424->regmap, TAS6424_MODE_CTRL,
+				 TAS6424_RESET, TAS6424_RESET);
+	if (ret) {
+		dev_err(dev, "unable to reset device: %d\n", ret);
+		return ret;
+	}
+
+	INIT_DELAYED_WORK(&tas6424->fault_check_work, tas6424_fault_check_work);
+
+	ret = snd_soc_register_codec(dev, &soc_codec_dev_tas6424,
+				     tas6424_dai, ARRAY_SIZE(tas6424_dai));
+	if (ret < 0) {
+		dev_err(dev, "unable to register codec: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tas6424_i2c_remove(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	struct tas6424_data *tas6424 = dev_get_drvdata(dev);
+	int ret;
+
+	snd_soc_unregister_codec(dev);
+
+	cancel_delayed_work_sync(&tas6424->fault_check_work);
+
+	ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+				     tas6424->supplies);
+	if (ret < 0) {
+		dev_err(dev, "unable to disable supplies: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct i2c_device_id tas6424_i2c_ids[] = {
+	{ "tas6424", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tas6424_i2c_ids);
+
+static struct i2c_driver tas6424_i2c_driver = {
+	.driver = {
+		.name = "tas6424",
+		.of_match_table = of_match_ptr(tas6424_of_ids),
+	},
+	.probe = tas6424_i2c_probe,
+	.remove = tas6424_i2c_remove,
+	.id_table = tas6424_i2c_ids,
+};
+module_i2c_driver(tas6424_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TAS6424 Audio amplifier driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas6424.h b/sound/soc/codecs/tas6424.h
new file mode 100644
index 0000000000000000000000000000000000000000..430588328a06a5aeef878f5b9f69e60a34351839
--- /dev/null
+++ b/sound/soc/codecs/tas6424.h
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *	Author: Andreas Dannenberg <dannenberg@ti.com>
+ *	Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef __TAS6424_H__
+#define __TAS6424_H__
+
+#define TAS6424_RATES (SNDRV_PCM_RATE_44100 | \
+		       SNDRV_PCM_RATE_48000 | \
+		       SNDRV_PCM_RATE_96000)
+
+#define TAS6424_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+			 SNDRV_PCM_FMTBIT_S24_LE)
+
+/* Register Address Map */
+#define TAS6424_MODE_CTRL		0x00
+#define TAS6424_MISC_CTRL1		0x01
+#define TAS6424_MISC_CTRL2		0x02
+#define TAS6424_SAP_CTRL		0x03
+#define TAS6424_CH_STATE_CTRL		0x04
+#define TAS6424_CH1_VOL_CTRL		0x05
+#define TAS6424_CH2_VOL_CTRL		0x06
+#define TAS6424_CH3_VOL_CTRL		0x07
+#define TAS6424_CH4_VOL_CTRL		0x08
+#define TAS6424_DC_DIAG_CTRL1		0x09
+#define TAS6424_DC_DIAG_CTRL2		0x0a
+#define TAS6424_DC_DIAG_CTRL3		0x0b
+#define TAS6424_DC_LOAD_DIAG_REP12	0x0c
+#define TAS6424_DC_LOAD_DIAG_REP34	0x0d
+#define TAS6424_DC_LOAD_DIAG_REPLO	0x0e
+#define TAS6424_CHANNEL_STATE		0x0f
+#define TAS6424_CHANNEL_FAULT		0x10
+#define TAS6424_GLOB_FAULT1		0x11
+#define TAS6424_GLOB_FAULT2		0x12
+#define TAS6424_WARN			0x13
+#define TAS6424_PIN_CTRL		0x14
+#define TAS6424_AC_DIAG_CTRL1		0x15
+#define TAS6424_AC_DIAG_CTRL2		0x16
+#define TAS6424_AC_LOAD_DIAG_REP1	0x17
+#define TAS6424_AC_LOAD_DIAG_REP2	0x18
+#define TAS6424_AC_LOAD_DIAG_REP3	0x19
+#define TAS6424_AC_LOAD_DIAG_REP4	0x1a
+#define TAS6424_MISC_CTRL3		0x21
+#define TAS6424_CLIP_CTRL		0x22
+#define TAS6424_CLIP_WINDOW		0x23
+#define TAS6424_CLIP_WARN		0x24
+#define TAS6424_CBC_STAT		0x25
+#define TAS6424_MISC_CTRL4		0x26
+#define TAS6424_MAX			TAS6424_MISC_CTRL4
+
+/* TAS6424_MODE_CTRL_REG */
+#define TAS6424_RESET			BIT(7)
+
+/* TAS6424_SAP_CTRL_REG */
+#define TAS6424_SAP_RATE_MASK		GENMASK(7, 6)
+#define TAS6424_SAP_RATE_44100		(0x00 << 6)
+#define TAS6424_SAP_RATE_48000		(0x01 << 6)
+#define TAS6424_SAP_RATE_96000		(0x02 << 6)
+#define TAS6424_SAP_TDM_SLOT_LAST	BIT(5)
+#define TAS6424_SAP_TDM_SLOT_SZ_16	BIT(4)
+#define TAS6424_SAP_TDM_SLOT_SWAP	BIT(3)
+#define TAS6424_SAP_FMT_MASK		GENMASK(2, 0)
+#define TAS6424_SAP_RIGHTJ_24		(0x00 << 0)
+#define TAS6424_SAP_RIGHTJ_20		(0x01 << 0)
+#define TAS6424_SAP_RIGHTJ_18		(0x02 << 0)
+#define TAS6424_SAP_RIGHTJ_16		(0x03 << 0)
+#define TAS6424_SAP_I2S			(0x04 << 0)
+#define TAS6424_SAP_LEFTJ		(0x05 << 0)
+#define TAS6424_SAP_DSP			(0x06 << 0)
+
+/* TAS6424_CH_STATE_CTRL_REG */
+#define TAS6424_CH1_STATE_MASK		GENMASK(7, 6)
+#define TAS6424_CH1_STATE_PLAY		(0x00 << 6)
+#define TAS6424_CH1_STATE_HIZ		(0x01 << 6)
+#define TAS6424_CH1_STATE_MUTE		(0x02 << 6)
+#define TAS6424_CH1_STATE_DIAG		(0x03 << 6)
+#define TAS6424_CH2_STATE_MASK		GENMASK(5, 4)
+#define TAS6424_CH2_STATE_PLAY		(0x00 << 4)
+#define TAS6424_CH2_STATE_HIZ		(0x01 << 4)
+#define TAS6424_CH2_STATE_MUTE		(0x02 << 4)
+#define TAS6424_CH2_STATE_DIAG		(0x03 << 4)
+#define TAS6424_CH3_STATE_MASK		GENMASK(3, 2)
+#define TAS6424_CH3_STATE_PLAY		(0x00 << 2)
+#define TAS6424_CH3_STATE_HIZ		(0x01 << 2)
+#define TAS6424_CH3_STATE_MUTE		(0x02 << 2)
+#define TAS6424_CH3_STATE_DIAG		(0x03 << 2)
+#define TAS6424_CH4_STATE_MASK		GENMASK(1, 0)
+#define TAS6424_CH4_STATE_PLAY		(0x00 << 0)
+#define TAS6424_CH4_STATE_HIZ		(0x01 << 0)
+#define TAS6424_CH4_STATE_MUTE		(0x02 << 0)
+#define TAS6424_CH4_STATE_DIAG		(0x03 << 0)
+#define TAS6424_ALL_STATE_PLAY		(TAS6424_CH1_STATE_PLAY | \
+					 TAS6424_CH2_STATE_PLAY | \
+					 TAS6424_CH3_STATE_PLAY | \
+					 TAS6424_CH4_STATE_PLAY)
+#define TAS6424_ALL_STATE_HIZ		(TAS6424_CH1_STATE_HIZ | \
+					 TAS6424_CH2_STATE_HIZ | \
+					 TAS6424_CH3_STATE_HIZ | \
+					 TAS6424_CH4_STATE_HIZ)
+#define TAS6424_ALL_STATE_MUTE		(TAS6424_CH1_STATE_MUTE | \
+					 TAS6424_CH2_STATE_MUTE | \
+					 TAS6424_CH3_STATE_MUTE | \
+					 TAS6424_CH4_STATE_MUTE)
+#define TAS6424_ALL_STATE_DIAG		(TAS6424_CH1_STATE_DIAG | \
+					 TAS6424_CH2_STATE_DIAG | \
+					 TAS6424_CH3_STATE_DIAG | \
+					 TAS6424_CH4_STATE_DIAG)
+
+/* TAS6424_GLOB_FAULT1_REG */
+#define TAS6424_FAULT_CLOCK		BIT(4)
+#define TAS6424_FAULT_PVDD_OV		BIT(3)
+#define TAS6424_FAULT_VBAT_OV		BIT(2)
+#define TAS6424_FAULT_PVDD_UV		BIT(1)
+#define TAS6424_FAULT_VBAT_UV		BIT(0)
+
+/* TAS6424_GLOB_FAULT2_REG */
+#define TAS6424_FAULT_OTSD		BIT(4)
+#define TAS6424_FAULT_OTSD_CH1		BIT(3)
+#define TAS6424_FAULT_OTSD_CH2		BIT(2)
+#define TAS6424_FAULT_OTSD_CH3		BIT(1)
+#define TAS6424_FAULT_OTSD_CH4		BIT(0)
+
+/* TAS6424_WARN_REG */
+#define TAS6424_WARN_VDD_UV		BIT(6)
+#define TAS6424_WARN_VDD_POR		BIT(5)
+#define TAS6424_WARN_VDD_OTW		BIT(4)
+#define TAS6424_WARN_VDD_OTW_CH1	BIT(3)
+#define TAS6424_WARN_VDD_OTW_CH2	BIT(2)
+#define TAS6424_WARN_VDD_OTW_CH3	BIT(1)
+#define TAS6424_WARN_VDD_OTW_CH4	BIT(0)
+
+/* TAS6424_MISC_CTRL3_REG */
+#define TAS6424_CLEAR_FAULT		BIT(7)
+#define TAS6424_PBTL_CH_SEL		BIT(6)
+#define TAS6424_MASK_CBC_WARN		BIT(5)
+#define TAS6424_MASK_VDD_UV		BIT(4)
+#define TAS6424_OTSD_AUTO_RECOVERY	BIT(3)
+
+#endif /* __TAS6424_H__ */
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index f8dd67ca0744c98ff71939cd3944a82f2e3db1c8..e7ca764b57295fef26ecf6d3d113b594ef578cb9 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -316,6 +316,7 @@ static const struct of_device_id tfa9879_of_match[] = {
 	{ .compatible = "nxp,tfa9879", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, tfa9879_of_match);
 
 static struct i2c_driver tfa9879_i2c_driver = {
 	.driver = {
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index e2862372c26e1f6807c7f5d7035c1b9d2722ae45..858cb8be445f7c4b06a438ac0d8b603f80879d45 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * ALSA SoC TLV320AIC31XX codec driver
+ * ALSA SoC TLV320AIC31xx CODEC Driver
  *
- * Copyright (C) 2014 Texas Instruments, Inc.
- *
- * Author: Jyri Sarha <jsarha@ti.com>
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *	Jyri Sarha <jsarha@ti.com>
  *
  * Based on ground work by: Ajit Kulkarni <x0175765@ti.com>
  *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED AS IS AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
- * The TLV320AIC31xx series of audio codec is a low-power, highly integrated
- * high performance codec which provides a stereo DAC, a mono ADC,
+ * The TLV320AIC31xx series of audio codecs are low-power, highly integrated
+ * high performance codecs which provides a stereo DAC, a mono ADC,
  * and mono/stereo Class-D speaker driver.
  */
 
@@ -26,7 +18,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/regulator/consumer.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
@@ -144,8 +136,7 @@ static const struct regmap_config aic31xx_i2c_regmap = {
 	.max_register = 12 * 128,
 };
 
-#define AIC31XX_NUM_SUPPLIES	6
-static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
+static const char * const aic31xx_supply_names[] = {
 	"HPVDD",
 	"SPRVDD",
 	"SPLVDD",
@@ -154,6 +145,8 @@ static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
 	"DVDD",
 };
 
+#define AIC31XX_NUM_SUPPLIES ARRAY_SIZE(aic31xx_supply_names)
+
 struct aic31xx_disable_nb {
 	struct notifier_block nb;
 	struct aic31xx_priv *aic31xx;
@@ -164,6 +157,9 @@ struct aic31xx_priv {
 	u8 i2c_regs_status;
 	struct device *dev;
 	struct regmap *regmap;
+	enum aic31xx_type codec_type;
+	struct gpio_desc *gpio_reset;
+	int micbias_vg;
 	struct aic31xx_pdata pdata;
 	struct regulator_bulk_data supplies[AIC31XX_NUM_SUPPLIES];
 	struct aic31xx_disable_nb disable_nb[AIC31XX_NUM_SUPPLIES];
@@ -185,7 +181,7 @@ struct aic31xx_rate_divs {
 	u8 madc;
 };
 
-/* ADC dividers can be disabled by cofiguring them to 0 */
+/* ADC dividers can be disabled by configuring them to 0 */
 static const struct aic31xx_rate_divs aic31xx_divs[] = {
 	/* mclk/p    rate  pll: j     d        dosr ndac mdac  aors nadc madc */
 	/* 8k rate */
@@ -456,7 +452,7 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
 		/* change mic bias voltage to user defined */
 		snd_soc_update_bits(codec, AIC31XX_MICBIAS,
 				    AIC31XX_MICBIAS_MASK,
-				    aic31xx->pdata.micbias_vg <<
+				    aic31xx->micbias_vg <<
 				    AIC31XX_MICBIAS_SHIFT);
 		dev_dbg(codec->dev, "%s: turned on\n", __func__);
 		break;
@@ -679,14 +675,14 @@ static int aic31xx_add_controls(struct snd_soc_codec *codec)
 	int ret = 0;
 	struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
 
-	if (!(aic31xx->pdata.codec_type & DAC31XX_BIT))
+	if (!(aic31xx->codec_type & DAC31XX_BIT))
 		ret = snd_soc_add_codec_controls(
 			codec, aic31xx_snd_controls,
 			ARRAY_SIZE(aic31xx_snd_controls));
 	if (ret)
 		return ret;
 
-	if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT)
+	if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT)
 		ret = snd_soc_add_codec_controls(
 			codec, aic311x_snd_controls,
 			ARRAY_SIZE(aic311x_snd_controls));
@@ -704,7 +700,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
 	struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
 	int ret = 0;
 
-	if (aic31xx->pdata.codec_type & DAC31XX_BIT) {
+	if (aic31xx->codec_type & DAC31XX_BIT) {
 		ret = snd_soc_dapm_new_controls(
 			dapm, dac31xx_dapm_widgets,
 			ARRAY_SIZE(dac31xx_dapm_widgets));
@@ -728,7 +724,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
 			return ret;
 	}
 
-	if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
+	if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
 		ret = snd_soc_dapm_new_controls(
 			dapm, aic311x_dapm_widgets,
 			ARRAY_SIZE(aic311x_dapm_widgets));
@@ -760,11 +756,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
 {
 	struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
 	int bclk_score = snd_soc_params_to_frame_size(params);
-	int mclk_p = aic31xx->sysclk / aic31xx->p_div;
+	int mclk_p;
 	int bclk_n = 0;
 	int match = -1;
 	int i;
 
+	if (!aic31xx->sysclk || !aic31xx->p_div) {
+		dev_err(codec->dev, "Master clock not supplied\n");
+		return -EINVAL;
+	}
+	mclk_p = aic31xx->sysclk / aic31xx->p_div;
+
 	/* Use PLL as CODEC_CLKIN and DAC_CLK as BDIV_CLKIN */
 	snd_soc_update_bits(codec, AIC31XX_CLKMUX,
 			    AIC31XX_CODEC_CLKIN_MASK, AIC31XX_CODEC_CLKIN_PLL);
@@ -840,11 +842,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
 
 	dev_dbg(codec->dev,
 		"pll %d.%04d/%d dosr %d n %d m %d aosr %d n %d m %d bclk_n %d\n",
-		aic31xx_divs[i].pll_j, aic31xx_divs[i].pll_d,
-		aic31xx->p_div, aic31xx_divs[i].dosr,
-		aic31xx_divs[i].ndac, aic31xx_divs[i].mdac,
-		aic31xx_divs[i].aosr, aic31xx_divs[i].nadc,
-		aic31xx_divs[i].madc, bclk_n);
+		aic31xx_divs[i].pll_j,
+		aic31xx_divs[i].pll_d,
+		aic31xx->p_div,
+		aic31xx_divs[i].dosr,
+		aic31xx_divs[i].ndac,
+		aic31xx_divs[i].mdac,
+		aic31xx_divs[i].aosr,
+		aic31xx_divs[i].nadc,
+		aic31xx_divs[i].madc,
+		bclk_n
+	);
 
 	return 0;
 }
@@ -919,8 +927,28 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
 	case SND_SOC_DAIFMT_CBM_CFM:
 		iface_reg1 |= AIC31XX_BCLK_MASTER | AIC31XX_WCLK_MASTER;
 		break;
+	case SND_SOC_DAIFMT_CBS_CFM:
+		iface_reg1 |= AIC31XX_WCLK_MASTER;
+		break;
+	case SND_SOC_DAIFMT_CBM_CFS:
+		iface_reg1 |= AIC31XX_BCLK_MASTER;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+		return -EINVAL;
+	}
+
+	/* signal polarity */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		iface_reg2 |= AIC31XX_BCLKINV_MASK;
+		break;
 	default:
-		dev_alert(codec->dev, "Invalid DAI master/slave interface\n");
+		dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
 		return -EINVAL;
 	}
 
@@ -931,16 +959,12 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
 	case SND_SOC_DAIFMT_DSP_A:
 		dsp_a_val = 0x1; /* fall through */
 	case SND_SOC_DAIFMT_DSP_B:
-		/* NOTE: BCLKINV bit value 1 equas NB and 0 equals IB */
-		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
-		case SND_SOC_DAIFMT_NB_NF:
-			iface_reg2 |= AIC31XX_BCLKINV_MASK;
-			break;
-		case SND_SOC_DAIFMT_IB_NF:
-			break;
-		default:
-			return -EINVAL;
-		}
+		/*
+		 * NOTE: This CODEC samples on the falling edge of BCLK in
+		 * DSP mode, this is inverted compared to what most DAIs
+		 * expect, so we invert for this mode
+		 */
+		iface_reg2 ^= AIC31XX_BCLKINV_MASK;
 		iface_reg1 |= (AIC31XX_DSP_MODE <<
 			       AIC31XX_IFACE1_DATATYPE_SHIFT);
 		break;
@@ -981,8 +1005,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 	dev_dbg(codec->dev, "## %s: clk_id = %d, freq = %d, dir = %d\n",
 		__func__, clk_id, freq, dir);
 
-	for (i = 1; freq/i > 20000000 && i < 8; i++)
-		;
+	for (i = 1; i < 8; i++)
+		if (freq / i <= 20000000)
+			break;
 	if (freq/i > 20000000) {
 		dev_err(aic31xx->dev, "%s: Too high mclk frequency %u\n",
 			__func__, freq);
@@ -990,9 +1015,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 	}
 	aic31xx->p_div = i;
 
-	for (i = 0; i < ARRAY_SIZE(aic31xx_divs) &&
-		     aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++)
-		;
+	for (i = 0; i < ARRAY_SIZE(aic31xx_divs); i++)
+		if (aic31xx_divs[i].mclk_p == freq / aic31xx->p_div)
+			break;
 	if (i == ARRAY_SIZE(aic31xx_divs)) {
 		dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
 			__func__, freq);
@@ -1004,6 +1029,7 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 			    clk_id << AIC31XX_PLL_CLKIN_SHIFT);
 
 	aic31xx->sysclk = freq;
+
 	return 0;
 }
 
@@ -1019,8 +1045,8 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
 		 * Put codec to reset and as at least one of the
 		 * supplies was disabled.
 		 */
-		if (gpio_is_valid(aic31xx->pdata.gpio_reset))
-			gpio_set_value(aic31xx->pdata.gpio_reset, 0);
+		if (aic31xx->gpio_reset)
+			gpiod_set_value(aic31xx->gpio_reset, 1);
 
 		regcache_mark_dirty(aic31xx->regmap);
 		dev_dbg(aic31xx->dev, "## %s: DISABLE received\n", __func__);
@@ -1029,6 +1055,22 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
 	return 0;
 }
 
+static int aic31xx_reset(struct aic31xx_priv *aic31xx)
+{
+	int ret = 0;
+
+	if (aic31xx->gpio_reset) {
+		gpiod_set_value(aic31xx->gpio_reset, 1);
+		ndelay(10); /* At least 10ns */
+		gpiod_set_value(aic31xx->gpio_reset, 0);
+	} else {
+		ret = regmap_write(aic31xx->regmap, AIC31XX_RESET, 1);
+	}
+	mdelay(1); /* At least 1ms */
+
+	return ret;
+}
+
 static void aic31xx_clk_on(struct snd_soc_codec *codec)
 {
 	struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
@@ -1065,20 +1107,22 @@ static void aic31xx_clk_off(struct snd_soc_codec *codec)
 static int aic31xx_power_on(struct snd_soc_codec *codec)
 {
 	struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
+	int ret;
 
 	ret = regulator_bulk_enable(ARRAY_SIZE(aic31xx->supplies),
 				    aic31xx->supplies);
 	if (ret)
 		return ret;
 
-	if (gpio_is_valid(aic31xx->pdata.gpio_reset)) {
-		gpio_set_value(aic31xx->pdata.gpio_reset, 1);
-		udelay(100);
-	}
 	regcache_cache_only(aic31xx->regmap, false);
+
+	/* Reset device registers for a consistent power-on like state */
+	ret = aic31xx_reset(aic31xx);
+	if (ret < 0)
+		dev_err(aic31xx->dev, "Could not reset device: %d\n", ret);
+
 	ret = regcache_sync(aic31xx->regmap);
-	if (ret != 0) {
+	if (ret) {
 		dev_err(codec->dev,
 			"Failed to restore cache: %d\n", ret);
 		regcache_cache_only(aic31xx->regmap, true);
@@ -1086,19 +1130,17 @@ static int aic31xx_power_on(struct snd_soc_codec *codec)
 				       aic31xx->supplies);
 		return ret;
 	}
+
 	return 0;
 }
 
-static int aic31xx_power_off(struct snd_soc_codec *codec)
+static void aic31xx_power_off(struct snd_soc_codec *codec)
 {
 	struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
 
 	regcache_cache_only(aic31xx->regmap, true);
-	ret = regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
-				     aic31xx->supplies);
-
-	return ret;
+	regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
+			       aic31xx->supplies);
 }
 
 static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
@@ -1137,14 +1179,11 @@ static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
 
 static int aic31xx_codec_probe(struct snd_soc_codec *codec)
 {
-	int ret = 0;
 	struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
-	int i;
+	int i, ret;
 
 	dev_dbg(aic31xx->dev, "## %s\n", __func__);
 
-	aic31xx = snd_soc_codec_get_drvdata(codec);
-
 	aic31xx->codec = codec;
 
 	for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++) {
@@ -1169,8 +1208,10 @@ static int aic31xx_codec_probe(struct snd_soc_codec *codec)
 		return ret;
 
 	ret = aic31xx_add_widgets(codec);
+	if (ret)
+		return ret;
 
-	return ret;
+	return 0;
 }
 
 static int aic31xx_codec_remove(struct snd_soc_codec *codec)
@@ -1258,89 +1299,31 @@ static const struct of_device_id tlv320aic31xx_of_match[] = {
 	{},
 };
 MODULE_DEVICE_TABLE(of, tlv320aic31xx_of_match);
-
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
-	struct device_node *np = aic31xx->dev->of_node;
-	unsigned int value = MICBIAS_2_0V;
-	int ret;
-
-	of_property_read_u32(np, "ai31xx-micbias-vg", &value);
-	switch (value) {
-	case MICBIAS_2_0V:
-	case MICBIAS_2_5V:
-	case MICBIAS_AVDDV:
-		aic31xx->pdata.micbias_vg = value;
-		break;
-	default:
-		dev_err(aic31xx->dev,
-			"Bad ai31xx-micbias-vg value %d DT\n",
-			value);
-		aic31xx->pdata.micbias_vg = MICBIAS_2_0V;
-	}
-
-	ret = of_get_named_gpio(np, "gpio-reset", 0);
-	if (ret > 0)
-		aic31xx->pdata.gpio_reset = ret;
-}
-#else /* CONFIG_OF */
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
-}
 #endif /* CONFIG_OF */
 
-static int aic31xx_device_init(struct aic31xx_priv *aic31xx)
-{
-	int ret, i;
-
-	dev_set_drvdata(aic31xx->dev, aic31xx);
-
-	if (dev_get_platdata(aic31xx->dev))
-		memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev),
-		       sizeof(aic31xx->pdata));
-	else if (aic31xx->dev->of_node)
-		aic31xx_pdata_from_of(aic31xx);
-
-	if (aic31xx->pdata.gpio_reset) {
-		ret = devm_gpio_request_one(aic31xx->dev,
-					    aic31xx->pdata.gpio_reset,
-					    GPIOF_OUT_INIT_HIGH,
-					    "aic31xx-reset-pin");
-		if (ret < 0) {
-			dev_err(aic31xx->dev, "not able to acquire gpio\n");
-			return ret;
-		}
-	}
-
-	for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
-		aic31xx->supplies[i].supply = aic31xx_supply_names[i];
-
-	ret = devm_regulator_bulk_get(aic31xx->dev,
-				      ARRAY_SIZE(aic31xx->supplies),
-				      aic31xx->supplies);
-	if (ret != 0)
-		dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
-
-	return ret;
-}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aic31xx_acpi_match[] = {
+	{ "10TI3100", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
+#endif
 
 static int aic31xx_i2c_probe(struct i2c_client *i2c,
 			     const struct i2c_device_id *id)
 {
 	struct aic31xx_priv *aic31xx;
-	int ret;
-	const struct regmap_config *regmap_config;
+	unsigned int micbias_value = MICBIAS_2_0V;
+	int i, ret;
 
 	dev_dbg(&i2c->dev, "## %s: %s codec_type = %d\n", __func__,
-		id->name, (int) id->driver_data);
-
-	regmap_config = &aic31xx_i2c_regmap;
+		id->name, (int)id->driver_data);
 
 	aic31xx = devm_kzalloc(&i2c->dev, sizeof(*aic31xx), GFP_KERNEL);
-	if (aic31xx == NULL)
+	if (!aic31xx)
 		return -ENOMEM;
 
-	aic31xx->regmap = devm_regmap_init_i2c(i2c, regmap_config);
+	aic31xx->regmap = devm_regmap_init_i2c(i2c, &aic31xx_i2c_regmap);
 	if (IS_ERR(aic31xx->regmap)) {
 		ret = PTR_ERR(aic31xx->regmap);
 		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -1349,13 +1332,49 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
 	}
 	aic31xx->dev = &i2c->dev;
 
-	aic31xx->pdata.codec_type = id->driver_data;
+	aic31xx->codec_type = id->driver_data;
 
-	ret = aic31xx_device_init(aic31xx);
-	if (ret)
+	dev_set_drvdata(aic31xx->dev, aic31xx);
+
+	fwnode_property_read_u32(aic31xx->dev->fwnode, "ai31xx-micbias-vg",
+				 &micbias_value);
+	switch (micbias_value) {
+	case MICBIAS_2_0V:
+	case MICBIAS_2_5V:
+	case MICBIAS_AVDDV:
+		aic31xx->micbias_vg = micbias_value;
+		break;
+	default:
+		dev_err(aic31xx->dev, "Bad ai31xx-micbias-vg value %d\n",
+			micbias_value);
+		aic31xx->micbias_vg = MICBIAS_2_0V;
+	}
+
+	if (dev_get_platdata(aic31xx->dev)) {
+		memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev), sizeof(aic31xx->pdata));
+		aic31xx->codec_type = aic31xx->pdata.codec_type;
+		aic31xx->micbias_vg = aic31xx->pdata.micbias_vg;
+	}
+
+	aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
+						      GPIOD_OUT_LOW);
+	if (IS_ERR(aic31xx->gpio_reset)) {
+		dev_err(aic31xx->dev, "not able to acquire gpio\n");
+		return PTR_ERR(aic31xx->gpio_reset);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
+		aic31xx->supplies[i].supply = aic31xx_supply_names[i];
+
+	ret = devm_regulator_bulk_get(aic31xx->dev,
+				      ARRAY_SIZE(aic31xx->supplies),
+				      aic31xx->supplies);
+	if (ret) {
+		dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
 		return ret;
+	}
 
-	if (aic31xx->pdata.codec_type & DAC31XX_BIT)
+	if (aic31xx->codec_type & DAC31XX_BIT)
 		return snd_soc_register_codec(&i2c->dev,
 				&soc_codec_driver_aic31xx,
 				dac31xx_dai_driver,
@@ -1386,14 +1405,6 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
 
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id aic31xx_acpi_match[] = {
-	{ "10TI3100", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
-#endif
-
 static struct i2c_driver aic31xx_i2c_driver = {
 	.driver = {
 		.name	= "tlv320aic31xx-codec",
@@ -1404,9 +1415,8 @@ static struct i2c_driver aic31xx_i2c_driver = {
 	.remove		= aic31xx_i2c_remove,
 	.id_table	= aic31xx_i2c_id,
 };
-
 module_i2c_driver(aic31xx_i2c_driver);
 
-MODULE_DESCRIPTION("ASoC TLV320AIC3111 codec driver");
-MODULE_AUTHOR("Jyri Sarha");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("ASoC TLV320AIC31xx CODEC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 1ff3edb7bbb6b28eb45c99b7b14baeadb46651ec..15ac7cba86fe83a2c4d9363bb9c080b73cfa0c52 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -1,36 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * ALSA SoC TLV320AIC31XX codec driver
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ * ALSA SoC TLV320AIC31xx CODEC Driver Definitions
  *
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
  */
+
 #ifndef _TLV320AIC31XX_H
 #define _TLV320AIC31XX_H
 
 #define AIC31XX_RATES	SNDRV_PCM_RATE_8000_192000
 
-#define AIC31XX_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
-			 | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE \
-			 | SNDRV_PCM_FMTBIT_S32_LE)
-
+#define AIC31XX_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | \
+			 SNDRV_PCM_FMTBIT_S20_3LE | \
+			 SNDRV_PCM_FMTBIT_S24_3LE | \
+			 SNDRV_PCM_FMTBIT_S24_LE | \
+			 SNDRV_PCM_FMTBIT_S32_LE)
 
-#define AIC31XX_STEREO_CLASS_D_BIT	0x1
-#define AIC31XX_MINIDSP_BIT		0x2
-#define DAC31XX_BIT			0x4
+#define AIC31XX_STEREO_CLASS_D_BIT	BIT(1)
+#define AIC31XX_MINIDSP_BIT		BIT(2)
+#define DAC31XX_BIT			BIT(3)
 
 enum aic31xx_type {
 	AIC3100	= 0,
 	AIC3110 = AIC31XX_STEREO_CLASS_D_BIT,
 	AIC3120 = AIC31XX_MINIDSP_BIT,
-	AIC3111 = (AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT),
+	AIC3111 = AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT,
 	DAC3100 = DAC31XX_BIT,
 	DAC3101 = DAC31XX_BIT | AIC31XX_STEREO_CLASS_D_BIT,
 };
@@ -43,222 +37,167 @@ struct aic31xx_pdata {
 
 #define AIC31XX_REG(page, reg)	((page * 128) + reg)
 
-/* Page Control Register */
-#define AIC31XX_PAGECTL		AIC31XX_REG(0, 0)
+#define AIC31XX_PAGECTL		AIC31XX_REG(0, 0) /* Page Control Register */
 
 /* Page 0 Registers */
-/* Software reset register */
-#define AIC31XX_RESET		AIC31XX_REG(0, 1)
-/* OT FLAG register */
-#define AIC31XX_OT_FLAG		AIC31XX_REG(0, 3)
-/* Clock clock Gen muxing, Multiplexers*/
-#define AIC31XX_CLKMUX		AIC31XX_REG(0, 4)
-/* PLL P and R-VAL register */
-#define AIC31XX_PLLPR		AIC31XX_REG(0, 5)
-/* PLL J-VAL register */
-#define AIC31XX_PLLJ		AIC31XX_REG(0, 6)
-/* PLL D-VAL MSB register */
-#define AIC31XX_PLLDMSB		AIC31XX_REG(0, 7)
-/* PLL D-VAL LSB register */
-#define AIC31XX_PLLDLSB		AIC31XX_REG(0, 8)
-/* DAC NDAC_VAL register*/
-#define AIC31XX_NDAC		AIC31XX_REG(0, 11)
-/* DAC MDAC_VAL register */
-#define AIC31XX_MDAC		AIC31XX_REG(0, 12)
-/* DAC OSR setting register 1, MSB value */
-#define AIC31XX_DOSRMSB		AIC31XX_REG(0, 13)
-/* DAC OSR setting register 2, LSB value */
-#define AIC31XX_DOSRLSB		AIC31XX_REG(0, 14)
+#define AIC31XX_RESET		AIC31XX_REG(0, 1) /* Software reset register */
+#define AIC31XX_OT_FLAG		AIC31XX_REG(0, 3) /* OT FLAG register */
+#define AIC31XX_CLKMUX		AIC31XX_REG(0, 4) /* Clock clock Gen muxing, Multiplexers*/
+#define AIC31XX_PLLPR		AIC31XX_REG(0, 5) /* PLL P and R-VAL register */
+#define AIC31XX_PLLJ		AIC31XX_REG(0, 6) /* PLL J-VAL register */
+#define AIC31XX_PLLDMSB		AIC31XX_REG(0, 7) /* PLL D-VAL MSB register */
+#define AIC31XX_PLLDLSB		AIC31XX_REG(0, 8) /* PLL D-VAL LSB register */
+#define AIC31XX_NDAC		AIC31XX_REG(0, 11) /* DAC NDAC_VAL register*/
+#define AIC31XX_MDAC		AIC31XX_REG(0, 12) /* DAC MDAC_VAL register */
+#define AIC31XX_DOSRMSB		AIC31XX_REG(0, 13) /* DAC OSR setting register 1, MSB value */
+#define AIC31XX_DOSRLSB		AIC31XX_REG(0, 14) /* DAC OSR setting register 2, LSB value */
 #define AIC31XX_MINI_DSP_INPOL	AIC31XX_REG(0, 16)
-/* Clock setting register 8, PLL */
-#define AIC31XX_NADC		AIC31XX_REG(0, 18)
-/* Clock setting register 9, PLL */
-#define AIC31XX_MADC		AIC31XX_REG(0, 19)
-/* ADC Oversampling (AOSR) Register */
-#define AIC31XX_AOSR		AIC31XX_REG(0, 20)
-/* Clock setting register 9, Multiplexers */
-#define AIC31XX_CLKOUTMUX	AIC31XX_REG(0, 25)
-/* Clock setting register 10, CLOCKOUT M divider value */
-#define AIC31XX_CLKOUTMVAL	AIC31XX_REG(0, 26)
-/* Audio Interface Setting Register 1 */
-#define AIC31XX_IFACE1		AIC31XX_REG(0, 27)
-/* Audio Data Slot Offset Programming */
-#define AIC31XX_DATA_OFFSET	AIC31XX_REG(0, 28)
-/* Audio Interface Setting Register 2 */
-#define AIC31XX_IFACE2		AIC31XX_REG(0, 29)
-/* Clock setting register 11, BCLK N Divider */
-#define AIC31XX_BCLKN		AIC31XX_REG(0, 30)
-/* Audio Interface Setting Register 3, Secondary Audio Interface */
-#define AIC31XX_IFACESEC1	AIC31XX_REG(0, 31)
-/* Audio Interface Setting Register 4 */
-#define AIC31XX_IFACESEC2	AIC31XX_REG(0, 32)
-/* Audio Interface Setting Register 5 */
-#define AIC31XX_IFACESEC3	AIC31XX_REG(0, 33)
-/* I2C Bus Condition */
-#define AIC31XX_I2C		AIC31XX_REG(0, 34)
-/* ADC FLAG */
-#define AIC31XX_ADCFLAG		AIC31XX_REG(0, 36)
-/* DAC Flag Registers */
-#define AIC31XX_DACFLAG1	AIC31XX_REG(0, 37)
+#define AIC31XX_NADC		AIC31XX_REG(0, 18) /* Clock setting register 8, PLL */
+#define AIC31XX_MADC		AIC31XX_REG(0, 19) /* Clock setting register 9, PLL */
+#define AIC31XX_AOSR		AIC31XX_REG(0, 20) /* ADC Oversampling (AOSR) Register */
+#define AIC31XX_CLKOUTMUX	AIC31XX_REG(0, 25) /* Clock setting register 9, Multiplexers */
+#define AIC31XX_CLKOUTMVAL	AIC31XX_REG(0, 26) /* Clock setting register 10, CLOCKOUT M divider value */
+#define AIC31XX_IFACE1		AIC31XX_REG(0, 27) /* Audio Interface Setting Register 1 */
+#define AIC31XX_DATA_OFFSET	AIC31XX_REG(0, 28) /* Audio Data Slot Offset Programming */
+#define AIC31XX_IFACE2		AIC31XX_REG(0, 29) /* Audio Interface Setting Register 2 */
+#define AIC31XX_BCLKN		AIC31XX_REG(0, 30) /* Clock setting register 11, BCLK N Divider */
+#define AIC31XX_IFACESEC1	AIC31XX_REG(0, 31) /* Audio Interface Setting Register 3, Secondary Audio Interface */
+#define AIC31XX_IFACESEC2	AIC31XX_REG(0, 32) /* Audio Interface Setting Register 4 */
+#define AIC31XX_IFACESEC3	AIC31XX_REG(0, 33) /* Audio Interface Setting Register 5 */
+#define AIC31XX_I2C		AIC31XX_REG(0, 34) /* I2C Bus Condition */
+#define AIC31XX_ADCFLAG		AIC31XX_REG(0, 36) /* ADC FLAG */
+#define AIC31XX_DACFLAG1	AIC31XX_REG(0, 37) /* DAC Flag Registers */
 #define AIC31XX_DACFLAG2	AIC31XX_REG(0, 38)
-/* Sticky Interrupt flag (overflow) */
-#define AIC31XX_OFFLAG		AIC31XX_REG(0, 39)
-/* Sticy DAC Interrupt flags */
-#define AIC31XX_INTRDACFLAG	AIC31XX_REG(0, 44)
-/* Sticy ADC Interrupt flags */
-#define AIC31XX_INTRADCFLAG	AIC31XX_REG(0, 45)
-/* DAC Interrupt flags 2 */
-#define AIC31XX_INTRDACFLAG2	AIC31XX_REG(0, 46)
-/* ADC Interrupt flags 2 */
-#define AIC31XX_INTRADCFLAG2	AIC31XX_REG(0, 47)
-/* INT1 interrupt control */
-#define AIC31XX_INT1CTRL	AIC31XX_REG(0, 48)
-/* INT2 interrupt control */
-#define AIC31XX_INT2CTRL	AIC31XX_REG(0, 49)
-/* GPIO1 control */
-#define AIC31XX_GPIO1		AIC31XX_REG(0, 51)
-
+#define AIC31XX_OFFLAG		AIC31XX_REG(0, 39) /* Sticky Interrupt flag (overflow) */
+#define AIC31XX_INTRDACFLAG	AIC31XX_REG(0, 44) /* Sticy DAC Interrupt flags */
+#define AIC31XX_INTRADCFLAG	AIC31XX_REG(0, 45) /* Sticy ADC Interrupt flags */
+#define AIC31XX_INTRDACFLAG2	AIC31XX_REG(0, 46) /* DAC Interrupt flags 2 */
+#define AIC31XX_INTRADCFLAG2	AIC31XX_REG(0, 47) /* ADC Interrupt flags 2 */
+#define AIC31XX_INT1CTRL	AIC31XX_REG(0, 48) /* INT1 interrupt control */
+#define AIC31XX_INT2CTRL	AIC31XX_REG(0, 49) /* INT2 interrupt control */
+#define AIC31XX_GPIO1		AIC31XX_REG(0, 51) /* GPIO1 control */
 #define AIC31XX_DACPRB		AIC31XX_REG(0, 60)
-/* ADC Instruction Set Register */
-#define AIC31XX_ADCPRB		AIC31XX_REG(0, 61)
-/* DAC channel setup register */
-#define AIC31XX_DACSETUP	AIC31XX_REG(0, 63)
-/* DAC Mute and volume control register */
-#define AIC31XX_DACMUTE		AIC31XX_REG(0, 64)
-/* Left DAC channel digital volume control */
-#define AIC31XX_LDACVOL		AIC31XX_REG(0, 65)
-/* Right DAC channel digital volume control */
-#define AIC31XX_RDACVOL		AIC31XX_REG(0, 66)
-/* Headset detection */
-#define AIC31XX_HSDETECT	AIC31XX_REG(0, 67)
-/* ADC Digital Mic */
-#define AIC31XX_ADCSETUP	AIC31XX_REG(0, 81)
-/* ADC Digital Volume Control Fine Adjust */
-#define AIC31XX_ADCFGA		AIC31XX_REG(0, 82)
-/* ADC Digital Volume Control Coarse Adjust */
-#define AIC31XX_ADCVOL		AIC31XX_REG(0, 83)
-
+#define AIC31XX_ADCPRB		AIC31XX_REG(0, 61) /* ADC Instruction Set Register */
+#define AIC31XX_DACSETUP	AIC31XX_REG(0, 63) /* DAC channel setup register */
+#define AIC31XX_DACMUTE		AIC31XX_REG(0, 64) /* DAC Mute and volume control register */
+#define AIC31XX_LDACVOL		AIC31XX_REG(0, 65) /* Left DAC channel digital volume control */
+#define AIC31XX_RDACVOL		AIC31XX_REG(0, 66) /* Right DAC channel digital volume control */
+#define AIC31XX_HSDETECT	AIC31XX_REG(0, 67) /* Headset detection */
+#define AIC31XX_ADCSETUP	AIC31XX_REG(0, 81) /* ADC Digital Mic */
+#define AIC31XX_ADCFGA		AIC31XX_REG(0, 82) /* ADC Digital Volume Control Fine Adjust */
+#define AIC31XX_ADCVOL		AIC31XX_REG(0, 83) /* ADC Digital Volume Control Coarse Adjust */
 
 /* Page 1 Registers */
-/* Headphone drivers */
-#define AIC31XX_HPDRIVER	AIC31XX_REG(1, 31)
-/* Class-D Speakear Amplifier */
-#define AIC31XX_SPKAMP		AIC31XX_REG(1, 32)
-/* HP Output Drivers POP Removal Settings */
-#define AIC31XX_HPPOP		AIC31XX_REG(1, 33)
-/* Output Driver PGA Ramp-Down Period Control */
-#define AIC31XX_SPPGARAMP	AIC31XX_REG(1, 34)
-/* DAC_L and DAC_R Output Mixer Routing */
-#define AIC31XX_DACMIXERROUTE	AIC31XX_REG(1, 35)
-/* Left Analog Vol to HPL */
-#define AIC31XX_LANALOGHPL	AIC31XX_REG(1, 36)
-/* Right Analog Vol to HPR */
-#define AIC31XX_RANALOGHPR	AIC31XX_REG(1, 37)
-/* Left Analog Vol to SPL */
-#define AIC31XX_LANALOGSPL	AIC31XX_REG(1, 38)
-/* Right Analog Vol to SPR */
-#define AIC31XX_RANALOGSPR	AIC31XX_REG(1, 39)
-/* HPL Driver */
-#define AIC31XX_HPLGAIN		AIC31XX_REG(1, 40)
-/* HPR Driver */
-#define AIC31XX_HPRGAIN		AIC31XX_REG(1, 41)
-/* SPL Driver */
-#define AIC31XX_SPLGAIN		AIC31XX_REG(1, 42)
-/* SPR Driver */
-#define AIC31XX_SPRGAIN		AIC31XX_REG(1, 43)
-/* HP Driver Control */
-#define AIC31XX_HPCONTROL	AIC31XX_REG(1, 44)
-/* MIC Bias Control */
-#define AIC31XX_MICBIAS		AIC31XX_REG(1, 46)
-/* MIC PGA*/
-#define AIC31XX_MICPGA		AIC31XX_REG(1, 47)
-/* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
-#define AIC31XX_MICPGAPI	AIC31XX_REG(1, 48)
-/* ADC Input Selection for M-Terminal */
-#define AIC31XX_MICPGAMI	AIC31XX_REG(1, 49)
-/* Input CM Settings */
-#define AIC31XX_MICPGACM	AIC31XX_REG(1, 50)
-
-/* Bits, masks and shifts */
+#define AIC31XX_HPDRIVER	AIC31XX_REG(1, 31) /* Headphone drivers */
+#define AIC31XX_SPKAMP		AIC31XX_REG(1, 32) /* Class-D Speakear Amplifier */
+#define AIC31XX_HPPOP		AIC31XX_REG(1, 33) /* HP Output Drivers POP Removal Settings */
+#define AIC31XX_SPPGARAMP	AIC31XX_REG(1, 34) /* Output Driver PGA Ramp-Down Period Control */
+#define AIC31XX_DACMIXERROUTE	AIC31XX_REG(1, 35) /* DAC_L and DAC_R Output Mixer Routing */
+#define AIC31XX_LANALOGHPL	AIC31XX_REG(1, 36) /* Left Analog Vol to HPL */
+#define AIC31XX_RANALOGHPR	AIC31XX_REG(1, 37) /* Right Analog Vol to HPR */
+#define AIC31XX_LANALOGSPL	AIC31XX_REG(1, 38) /* Left Analog Vol to SPL */
+#define AIC31XX_RANALOGSPR	AIC31XX_REG(1, 39) /* Right Analog Vol to SPR */
+#define AIC31XX_HPLGAIN		AIC31XX_REG(1, 40) /* HPL Driver */
+#define AIC31XX_HPRGAIN		AIC31XX_REG(1, 41) /* HPR Driver */
+#define AIC31XX_SPLGAIN		AIC31XX_REG(1, 42) /* SPL Driver */
+#define AIC31XX_SPRGAIN		AIC31XX_REG(1, 43) /* SPR Driver */
+#define AIC31XX_HPCONTROL	AIC31XX_REG(1, 44) /* HP Driver Control */
+#define AIC31XX_MICBIAS		AIC31XX_REG(1, 46) /* MIC Bias Control */
+#define AIC31XX_MICPGA		AIC31XX_REG(1, 47) /* MIC PGA*/
+#define AIC31XX_MICPGAPI	AIC31XX_REG(1, 48) /* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
+#define AIC31XX_MICPGAMI	AIC31XX_REG(1, 49) /* ADC Input Selection for M-Terminal */
+#define AIC31XX_MICPGACM	AIC31XX_REG(1, 50) /* Input CM Settings */
+
+/* Bits, masks, and shifts */
 
 /* AIC31XX_CLKMUX */
-#define AIC31XX_PLL_CLKIN_MASK			0x0c
-#define AIC31XX_PLL_CLKIN_SHIFT			2
-#define AIC31XX_PLL_CLKIN_MCLK			0
-#define AIC31XX_CODEC_CLKIN_MASK		0x03
-#define AIC31XX_CODEC_CLKIN_SHIFT		0
-#define AIC31XX_CODEC_CLKIN_PLL			3
-#define AIC31XX_CODEC_CLKIN_BCLK		1
-
-/* AIC31XX_PLLPR, AIC31XX_NDAC, AIC31XX_MDAC, AIC31XX_NADC, AIC31XX_MADC,
-   AIC31XX_BCLKN */
-#define AIC31XX_PLL_MASK		0x7f
-#define AIC31XX_PM_MASK			0x80
+#define AIC31XX_PLL_CLKIN_MASK		GENMASK(3, 2)
+#define AIC31XX_PLL_CLKIN_SHIFT		(2)
+#define AIC31XX_PLL_CLKIN_MCLK		0x00
+#define AIC31XX_PLL_CLKIN_BCKL		0x01
+#define AIC31XX_PLL_CLKIN_GPIO1		0x02
+#define AIC31XX_PLL_CLKIN_DIN		0x03
+#define AIC31XX_CODEC_CLKIN_MASK	GENMASK(1, 0)
+#define AIC31XX_CODEC_CLKIN_SHIFT	(0)
+#define AIC31XX_CODEC_CLKIN_MCLK	0x00
+#define AIC31XX_CODEC_CLKIN_BCLK	0x01
+#define AIC31XX_CODEC_CLKIN_GPIO1	0x02
+#define AIC31XX_CODEC_CLKIN_PLL		0x03
+
+/* AIC31XX_PLLPR */
+/* AIC31XX_NDAC */
+/* AIC31XX_MDAC */
+/* AIC31XX_NADC */
+/* AIC31XX_MADC */
+/* AIC31XX_BCLKN */
+#define AIC31XX_PLL_MASK		GENMASK(6, 0)
+#define AIC31XX_PM_MASK			BIT(7)
 
 /* AIC31XX_IFACE1 */
-#define AIC31XX_WORD_LEN_16BITS		0x00
-#define AIC31XX_WORD_LEN_20BITS		0x01
-#define AIC31XX_WORD_LEN_24BITS		0x02
-#define AIC31XX_WORD_LEN_32BITS		0x03
-#define AIC31XX_IFACE1_DATALEN_MASK	0x30
-#define AIC31XX_IFACE1_DATALEN_SHIFT	(4)
-#define AIC31XX_IFACE1_DATATYPE_MASK	0xC0
+#define AIC31XX_IFACE1_DATATYPE_MASK	GENMASK(7, 6)
 #define AIC31XX_IFACE1_DATATYPE_SHIFT	(6)
 #define AIC31XX_I2S_MODE		0x00
 #define AIC31XX_DSP_MODE		0x01
 #define AIC31XX_RIGHT_JUSTIFIED_MODE	0x02
 #define AIC31XX_LEFT_JUSTIFIED_MODE	0x03
-#define AIC31XX_IFACE1_MASTER_MASK	0x0C
-#define AIC31XX_BCLK_MASTER		0x08
-#define AIC31XX_WCLK_MASTER		0x04
+#define AIC31XX_IFACE1_DATALEN_MASK	GENMASK(5, 4)
+#define AIC31XX_IFACE1_DATALEN_SHIFT	(4)
+#define AIC31XX_WORD_LEN_16BITS		0x00
+#define AIC31XX_WORD_LEN_20BITS		0x01
+#define AIC31XX_WORD_LEN_24BITS		0x02
+#define AIC31XX_WORD_LEN_32BITS		0x03
+#define AIC31XX_IFACE1_MASTER_MASK	GENMASK(3, 2)
+#define AIC31XX_BCLK_MASTER		BIT(2)
+#define AIC31XX_WCLK_MASTER		BIT(3)
 
 /* AIC31XX_DATA_OFFSET */
-#define AIC31XX_DATA_OFFSET_MASK	0xFF
+#define AIC31XX_DATA_OFFSET_MASK	GENMASK(7, 0)
 
 /* AIC31XX_IFACE2 */
-#define AIC31XX_BCLKINV_MASK		0x08
-#define AIC31XX_BDIVCLK_MASK		0x03
+#define AIC31XX_BCLKINV_MASK		BIT(3)
+#define AIC31XX_BDIVCLK_MASK		GENMASK(1, 0)
 #define AIC31XX_DAC2BCLK		0x00
 #define AIC31XX_DACMOD2BCLK		0x01
 #define AIC31XX_ADC2BCLK		0x02
 #define AIC31XX_ADCMOD2BCLK		0x03
 
 /* AIC31XX_ADCFLAG */
-#define AIC31XX_ADCPWRSTATUS_MASK		0x40
+#define AIC31XX_ADCPWRSTATUS_MASK	BIT(6)
 
 /* AIC31XX_DACFLAG1 */
-#define AIC31XX_LDACPWRSTATUS_MASK		0x80
-#define AIC31XX_RDACPWRSTATUS_MASK		0x08
-#define AIC31XX_HPLDRVPWRSTATUS_MASK		0x20
-#define AIC31XX_HPRDRVPWRSTATUS_MASK		0x02
-#define AIC31XX_SPLDRVPWRSTATUS_MASK		0x10
-#define AIC31XX_SPRDRVPWRSTATUS_MASK		0x01
+#define AIC31XX_LDACPWRSTATUS_MASK	BIT(7)
+#define AIC31XX_HPLDRVPWRSTATUS_MASK	BIT(5)
+#define AIC31XX_SPLDRVPWRSTATUS_MASK	BIT(4)
+#define AIC31XX_RDACPWRSTATUS_MASK	BIT(3)
+#define AIC31XX_HPRDRVPWRSTATUS_MASK	BIT(1)
+#define AIC31XX_SPRDRVPWRSTATUS_MASK	BIT(0)
 
 /* AIC31XX_INTRDACFLAG */
-#define AIC31XX_HPSCDETECT_MASK			0x80
-#define AIC31XX_BUTTONPRESS_MASK		0x20
-#define AIC31XX_HSPLUG_MASK			0x10
-#define AIC31XX_LDRCTHRES_MASK			0x08
-#define AIC31XX_RDRCTHRES_MASK			0x04
-#define AIC31XX_DACSINT_MASK			0x02
-#define AIC31XX_DACAINT_MASK			0x01
+#define AIC31XX_HPLSCDETECT		BIT(7)
+#define AIC31XX_HPRSCDETECT		BIT(6)
+#define AIC31XX_BUTTONPRESS		BIT(5)
+#define AIC31XX_HSPLUG			BIT(4)
+#define AIC31XX_LDRCTHRES		BIT(3)
+#define AIC31XX_RDRCTHRES		BIT(2)
+#define AIC31XX_DACSINT			BIT(1)
+#define AIC31XX_DACAINT			BIT(0)
 
 /* AIC31XX_INT1CTRL */
-#define AIC31XX_HSPLUGDET_MASK			0x80
-#define AIC31XX_BUTTONPRESSDET_MASK		0x40
-#define AIC31XX_DRCTHRES_MASK			0x20
-#define AIC31XX_AGCNOISE_MASK			0x10
-#define AIC31XX_OC_MASK				0x08
-#define AIC31XX_ENGINE_MASK			0x04
+#define AIC31XX_HSPLUGDET		BIT(7)
+#define AIC31XX_BUTTONPRESSDET		BIT(6)
+#define AIC31XX_DRCTHRES		BIT(5)
+#define AIC31XX_AGCNOISE		BIT(4)
+#define AIC31XX_SC			BIT(3)
+#define AIC31XX_ENGINE			BIT(2)
 
 /* AIC31XX_DACSETUP */
-#define AIC31XX_SOFTSTEP_MASK			0x03
+#define AIC31XX_SOFTSTEP_MASK		GENMASK(1, 0)
 
 /* AIC31XX_DACMUTE */
-#define AIC31XX_DACMUTE_MASK			0x0C
+#define AIC31XX_DACMUTE_MASK		GENMASK(3, 2)
 
 /* AIC31XX_MICBIAS */
-#define AIC31XX_MICBIAS_MASK			0x03
-#define AIC31XX_MICBIAS_SHIFT			0
+#define AIC31XX_MICBIAS_MASK		GENMASK(1, 0)
+#define AIC31XX_MICBIAS_SHIFT		0
 
 #endif	/* _TLV320AIC31XX_H */
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e694f5f04eb90c6ef94e9fcb251ba450612ba5a4..fea019343c3bfd5656e172c14c6e48eb531c530a 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -281,34 +281,34 @@ static const struct snd_kcontrol_new aic32x4_snd_controls[] = {
 
 static const struct aic32x4_rate_divs aic32x4_divs[] = {
 	/* 8k rate */
-	{AIC32X4_FREQ_12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
-	{AIC32X4_FREQ_24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
-	{AIC32X4_FREQ_25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
+	{12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
+	{24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
+	{25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
 	/* 11.025k rate */
-	{AIC32X4_FREQ_12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
-	{AIC32X4_FREQ_24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
+	{12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
+	{24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
 	/* 16k rate */
-	{AIC32X4_FREQ_12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
-	{AIC32X4_FREQ_24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
-	{AIC32X4_FREQ_25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
+	{12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
+	{24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
+	{25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
 	/* 22.05k rate */
-	{AIC32X4_FREQ_12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
-	{AIC32X4_FREQ_24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
-	{AIC32X4_FREQ_25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
+	{12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
+	{24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
+	{25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
 	/* 32k rate */
-	{AIC32X4_FREQ_12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
-	{AIC32X4_FREQ_24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
+	{12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
+	{24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
 	/* 44.1k rate */
-	{AIC32X4_FREQ_12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
-	{AIC32X4_FREQ_24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
-	{AIC32X4_FREQ_25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
+	{12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
+	{24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
+	{25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
 	/* 48k rate */
-	{AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
-	{AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
-	{AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
+	{12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
+	{24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
+	{25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
 
 	/* 96k rate */
-	{AIC32X4_FREQ_25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
+	{25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
 };
 
 static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
@@ -601,9 +601,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 	struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
 
 	switch (freq) {
-	case AIC32X4_FREQ_12000000:
-	case AIC32X4_FREQ_24000000:
-	case AIC32X4_FREQ_25000000:
+	case 12000000:
+	case 24000000:
+	case 25000000:
 		aic32x4->sysclk = freq;
 		return 0;
 	}
@@ -614,16 +614,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 {
 	struct snd_soc_codec *codec = codec_dai->codec;
-	u8 iface_reg_1;
-	u8 iface_reg_2;
-	u8 iface_reg_3;
-
-	iface_reg_1 = snd_soc_read(codec, AIC32X4_IFACE1);
-	iface_reg_1 = iface_reg_1 & ~(3 << 6 | 3 << 2);
-	iface_reg_2 = snd_soc_read(codec, AIC32X4_IFACE2);
-	iface_reg_2 = 0;
-	iface_reg_3 = snd_soc_read(codec, AIC32X4_IFACE3);
-	iface_reg_3 = iface_reg_3 & ~(1 << 3);
+	u8 iface_reg_1 = 0;
+	u8 iface_reg_2 = 0;
+	u8 iface_reg_3 = 0;
 
 	/* set master/slave audio interface */
 	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -641,30 +634,37 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 	case SND_SOC_DAIFMT_I2S:
 		break;
 	case SND_SOC_DAIFMT_DSP_A:
-		iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
-		iface_reg_3 |= (1 << 3); /* invert bit clock */
+		iface_reg_1 |= (AIC32X4_DSP_MODE <<
+				AIC32X4_IFACE1_DATATYPE_SHIFT);
+		iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
 		iface_reg_2 = 0x01; /* add offset 1 */
 		break;
 	case SND_SOC_DAIFMT_DSP_B:
-		iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
-		iface_reg_3 |= (1 << 3); /* invert bit clock */
+		iface_reg_1 |= (AIC32X4_DSP_MODE <<
+				AIC32X4_IFACE1_DATATYPE_SHIFT);
+		iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
 		break;
 	case SND_SOC_DAIFMT_RIGHT_J:
-		iface_reg_1 |=
-			(AIC32X4_RIGHT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+		iface_reg_1 |= (AIC32X4_RIGHT_JUSTIFIED_MODE <<
+				AIC32X4_IFACE1_DATATYPE_SHIFT);
 		break;
 	case SND_SOC_DAIFMT_LEFT_J:
-		iface_reg_1 |=
-			(AIC32X4_LEFT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+		iface_reg_1 |= (AIC32X4_LEFT_JUSTIFIED_MODE <<
+				AIC32X4_IFACE1_DATATYPE_SHIFT);
 		break;
 	default:
 		printk(KERN_ERR "aic32x4: invalid DAI interface format\n");
 		return -EINVAL;
 	}
 
-	snd_soc_write(codec, AIC32X4_IFACE1, iface_reg_1);
-	snd_soc_write(codec, AIC32X4_IFACE2, iface_reg_2);
-	snd_soc_write(codec, AIC32X4_IFACE3, iface_reg_3);
+	snd_soc_update_bits(codec, AIC32X4_IFACE1,
+			    AIC32X4_IFACE1_DATATYPE_MASK |
+			    AIC32X4_IFACE1_MASTER_MASK, iface_reg_1);
+	snd_soc_update_bits(codec, AIC32X4_IFACE2,
+			    AIC32X4_DATA_OFFSET_MASK, iface_reg_2);
+	snd_soc_update_bits(codec, AIC32X4_IFACE3,
+			    AIC32X4_BCLKINV_MASK, iface_reg_3);
+
 	return 0;
 }
 
@@ -674,7 +674,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
-	u8 data;
+	u8 iface1_reg = 0;
+	u8 dacsetup_reg = 0;
 	int i;
 
 	i = aic32x4_get_divs(aic32x4->sysclk, params_rate(params));
@@ -683,82 +684,88 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
 		return i;
 	}
 
-	/* Use PLL as CODEC_CLKIN and DAC_MOD_CLK as BDIV_CLKIN */
-	snd_soc_write(codec, AIC32X4_CLKMUX, AIC32X4_PLLCLKIN);
-	snd_soc_write(codec, AIC32X4_IFACE3, AIC32X4_DACMOD2BCLK);
+	/* MCLK as PLL_CLKIN */
+	snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_PLL_CLKIN_MASK,
+			    AIC32X4_PLL_CLKIN_MCLK << AIC32X4_PLL_CLKIN_SHIFT);
+	/* PLL as CODEC_CLKIN */
+	snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_CODEC_CLKIN_MASK,
+			    AIC32X4_CODEC_CLKIN_PLL << AIC32X4_CODEC_CLKIN_SHIFT);
+	/* DAC_MOD_CLK as BDIV_CLKIN */
+	snd_soc_update_bits(codec, AIC32X4_IFACE3, AIC32X4_BDIVCLK_MASK,
+			    AIC32X4_DACMOD2BCLK << AIC32X4_BDIVCLK_SHIFT);
+
+	/* We will fix R value to 1 and will make P & J=K.D as variable */
+	snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_R_MASK, 0x01);
 
-	/* We will fix R value to 1 and will make P & J=K.D as varialble */
-	data = snd_soc_read(codec, AIC32X4_PLLPR);
-	data &= ~(7 << 4);
-	snd_soc_write(codec, AIC32X4_PLLPR,
-		      (data | (aic32x4_divs[i].p_val << 4) | 0x01));
+	/* PLL P value */
+	snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_P_MASK,
+			    aic32x4_divs[i].p_val << AIC32X4_PLL_P_SHIFT);
 
+	/* PLL J value */
 	snd_soc_write(codec, AIC32X4_PLLJ, aic32x4_divs[i].pll_j);
 
+	/* PLL D value */
 	snd_soc_write(codec, AIC32X4_PLLDMSB, (aic32x4_divs[i].pll_d >> 8));
-	snd_soc_write(codec, AIC32X4_PLLDLSB,
-		      (aic32x4_divs[i].pll_d & 0xff));
+	snd_soc_write(codec, AIC32X4_PLLDLSB, (aic32x4_divs[i].pll_d & 0xff));
 
 	/* NDAC divider value */
-	data = snd_soc_read(codec, AIC32X4_NDAC);
-	data &= ~(0x7f);
-	snd_soc_write(codec, AIC32X4_NDAC, data | aic32x4_divs[i].ndac);
+	snd_soc_update_bits(codec, AIC32X4_NDAC,
+			    AIC32X4_NDAC_MASK, aic32x4_divs[i].ndac);
 
 	/* MDAC divider value */
-	data = snd_soc_read(codec, AIC32X4_MDAC);
-	data &= ~(0x7f);
-	snd_soc_write(codec, AIC32X4_MDAC, data | aic32x4_divs[i].mdac);
+	snd_soc_update_bits(codec, AIC32X4_MDAC,
+			    AIC32X4_MDAC_MASK, aic32x4_divs[i].mdac);
 
 	/* DOSR MSB & LSB values */
 	snd_soc_write(codec, AIC32X4_DOSRMSB, aic32x4_divs[i].dosr >> 8);
-	snd_soc_write(codec, AIC32X4_DOSRLSB,
-		      (aic32x4_divs[i].dosr & 0xff));
+	snd_soc_write(codec, AIC32X4_DOSRLSB, (aic32x4_divs[i].dosr & 0xff));
 
 	/* NADC divider value */
-	data = snd_soc_read(codec, AIC32X4_NADC);
-	data &= ~(0x7f);
-	snd_soc_write(codec, AIC32X4_NADC, data | aic32x4_divs[i].nadc);
+	snd_soc_update_bits(codec, AIC32X4_NADC,
+			    AIC32X4_NADC_MASK, aic32x4_divs[i].nadc);
 
 	/* MADC divider value */
-	data = snd_soc_read(codec, AIC32X4_MADC);
-	data &= ~(0x7f);
-	snd_soc_write(codec, AIC32X4_MADC, data | aic32x4_divs[i].madc);
+	snd_soc_update_bits(codec, AIC32X4_MADC,
+			    AIC32X4_MADC_MASK, aic32x4_divs[i].madc);
 
 	/* AOSR value */
 	snd_soc_write(codec, AIC32X4_AOSR, aic32x4_divs[i].aosr);
 
 	/* BCLK N divider */
-	data = snd_soc_read(codec, AIC32X4_BCLKN);
-	data &= ~(0x7f);
-	snd_soc_write(codec, AIC32X4_BCLKN, data | aic32x4_divs[i].blck_N);
+	snd_soc_update_bits(codec, AIC32X4_BCLKN,
+			    AIC32X4_BCLK_MASK, aic32x4_divs[i].blck_N);
 
-	data = snd_soc_read(codec, AIC32X4_IFACE1);
-	data = data & ~(3 << 4);
 	switch (params_width(params)) {
 	case 16:
+		iface1_reg |= (AIC32X4_WORD_LEN_16BITS <<
+			       AIC32X4_IFACE1_DATALEN_SHIFT);
 		break;
 	case 20:
-		data |= (AIC32X4_WORD_LEN_20BITS << AIC32X4_DOSRMSB_SHIFT);
+		iface1_reg |= (AIC32X4_WORD_LEN_20BITS <<
+			       AIC32X4_IFACE1_DATALEN_SHIFT);
 		break;
 	case 24:
-		data |= (AIC32X4_WORD_LEN_24BITS << AIC32X4_DOSRMSB_SHIFT);
+		iface1_reg |= (AIC32X4_WORD_LEN_24BITS <<
+			       AIC32X4_IFACE1_DATALEN_SHIFT);
 		break;
 	case 32:
-		data |= (AIC32X4_WORD_LEN_32BITS << AIC32X4_DOSRMSB_SHIFT);
+		iface1_reg |= (AIC32X4_WORD_LEN_32BITS <<
+			       AIC32X4_IFACE1_DATALEN_SHIFT);
 		break;
 	}
-	snd_soc_write(codec, AIC32X4_IFACE1, data);
+	snd_soc_update_bits(codec, AIC32X4_IFACE1,
+			    AIC32X4_IFACE1_DATALEN_MASK, iface1_reg);
 
 	if (params_channels(params) == 1) {
-		data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
+		dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
 	} else {
 		if (aic32x4->swapdacs)
-			data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
+			dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
 		else
-			data = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
+			dacsetup_reg = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
 	}
-	snd_soc_update_bits(codec, AIC32X4_DACSETUP, AIC32X4_DAC_CHAN_MASK,
-			data);
+	snd_soc_update_bits(codec, AIC32X4_DACSETUP,
+			    AIC32X4_DAC_CHAN_MASK, dacsetup_reg);
 
 	return 0;
 }
@@ -766,13 +773,10 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
 static int aic32x4_mute(struct snd_soc_dai *dai, int mute)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	u8 dac_reg;
 
-	dac_reg = snd_soc_read(codec, AIC32X4_DACMUTE) & ~AIC32X4_MUTEON;
-	if (mute)
-		snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg | AIC32X4_MUTEON);
-	else
-		snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg);
+	snd_soc_update_bits(codec, AIC32X4_DACMUTE,
+			    AIC32X4_MUTEON, mute ? AIC32X4_MUTEON : 0);
+
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index da7cec482bcbf3cc6f9db4f714342335c1cda60e..e9df49edbf19dae7145dba6c24aaef5672b6263c 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -19,141 +19,189 @@ int aic32x4_remove(struct device *dev);
 
 /* tlv320aic32x4 register space (in decimal to match datasheet) */
 
-#define AIC32X4_PAGE1		128
-
-#define	AIC32X4_PSEL		0
-#define	AIC32X4_RESET		1
-#define	AIC32X4_CLKMUX		4
-#define	AIC32X4_PLLPR		5
-#define	AIC32X4_PLLJ		6
-#define	AIC32X4_PLLDMSB		7
-#define	AIC32X4_PLLDLSB		8
-#define	AIC32X4_NDAC		11
-#define	AIC32X4_MDAC		12
-#define AIC32X4_DOSRMSB		13
-#define AIC32X4_DOSRLSB		14
-#define	AIC32X4_NADC		18
-#define	AIC32X4_MADC		19
-#define AIC32X4_AOSR		20
-#define AIC32X4_CLKMUX2		25
-#define AIC32X4_CLKOUTM		26
-#define AIC32X4_IFACE1		27
-#define AIC32X4_IFACE2		28
-#define AIC32X4_IFACE3		29
-#define AIC32X4_BCLKN		30
-#define AIC32X4_IFACE4		31
-#define AIC32X4_IFACE5		32
-#define AIC32X4_IFACE6		33
-#define AIC32X4_GPIOCTL		52
-#define AIC32X4_DOUTCTL		53
-#define AIC32X4_DINCTL		54
-#define AIC32X4_MISOCTL		55
-#define AIC32X4_SCLKCTL		56
-#define AIC32X4_DACSPB		60
-#define AIC32X4_ADCSPB		61
-#define AIC32X4_DACSETUP	63
-#define AIC32X4_DACMUTE		64
-#define AIC32X4_LDACVOL		65
-#define AIC32X4_RDACVOL		66
-#define AIC32X4_ADCSETUP	81
-#define	AIC32X4_ADCFGA		82
-#define AIC32X4_LADCVOL		83
-#define AIC32X4_RADCVOL		84
-#define AIC32X4_LAGC1		86
-#define AIC32X4_LAGC2		87
-#define AIC32X4_LAGC3		88
-#define AIC32X4_LAGC4		89
-#define AIC32X4_LAGC5		90
-#define AIC32X4_LAGC6		91
-#define AIC32X4_LAGC7		92
-#define AIC32X4_RAGC1		94
-#define AIC32X4_RAGC2		95
-#define AIC32X4_RAGC3		96
-#define AIC32X4_RAGC4		97
-#define AIC32X4_RAGC5		98
-#define AIC32X4_RAGC6		99
-#define AIC32X4_RAGC7		100
-#define AIC32X4_PWRCFG		(AIC32X4_PAGE1 + 1)
-#define AIC32X4_LDOCTL		(AIC32X4_PAGE1 + 2)
-#define AIC32X4_OUTPWRCTL	(AIC32X4_PAGE1 + 9)
-#define AIC32X4_CMMODE		(AIC32X4_PAGE1 + 10)
-#define AIC32X4_HPLROUTE	(AIC32X4_PAGE1 + 12)
-#define AIC32X4_HPRROUTE	(AIC32X4_PAGE1 + 13)
-#define AIC32X4_LOLROUTE	(AIC32X4_PAGE1 + 14)
-#define AIC32X4_LORROUTE	(AIC32X4_PAGE1 + 15)
-#define	AIC32X4_HPLGAIN		(AIC32X4_PAGE1 + 16)
-#define	AIC32X4_HPRGAIN		(AIC32X4_PAGE1 + 17)
-#define	AIC32X4_LOLGAIN		(AIC32X4_PAGE1 + 18)
-#define	AIC32X4_LORGAIN		(AIC32X4_PAGE1 + 19)
-#define AIC32X4_HEADSTART	(AIC32X4_PAGE1 + 20)
-#define AIC32X4_MICBIAS		(AIC32X4_PAGE1 + 51)
-#define AIC32X4_LMICPGAPIN	(AIC32X4_PAGE1 + 52)
-#define AIC32X4_LMICPGANIN	(AIC32X4_PAGE1 + 54)
-#define AIC32X4_RMICPGAPIN	(AIC32X4_PAGE1 + 55)
-#define AIC32X4_RMICPGANIN	(AIC32X4_PAGE1 + 57)
-#define AIC32X4_FLOATINGINPUT	(AIC32X4_PAGE1 + 58)
-#define AIC32X4_LMICPGAVOL	(AIC32X4_PAGE1 + 59)
-#define AIC32X4_RMICPGAVOL	(AIC32X4_PAGE1 + 60)
-
-#define AIC32X4_FREQ_12000000 12000000
-#define AIC32X4_FREQ_24000000 24000000
-#define AIC32X4_FREQ_25000000 25000000
-
-#define AIC32X4_WORD_LEN_16BITS		0x00
-#define AIC32X4_WORD_LEN_20BITS		0x01
-#define AIC32X4_WORD_LEN_24BITS		0x02
-#define AIC32X4_WORD_LEN_32BITS		0x03
-
-#define AIC32X4_LADC_EN			(1 << 7)
-#define AIC32X4_RADC_EN			(1 << 6)
-
-#define AIC32X4_I2S_MODE		0x00
-#define AIC32X4_DSP_MODE		0x01
-#define AIC32X4_RIGHT_JUSTIFIED_MODE	0x02
-#define AIC32X4_LEFT_JUSTIFIED_MODE	0x03
-
-#define AIC32X4_AVDDWEAKDISABLE		0x08
-#define AIC32X4_LDOCTLEN		0x01
-
-#define AIC32X4_LDOIN_18_36		0x01
-#define AIC32X4_LDOIN2HP		0x02
-
-#define AIC32X4_DACSPBLOCK_MASK		0x1f
-#define AIC32X4_ADCSPBLOCK_MASK		0x1f
-
-#define AIC32X4_PLLJ_SHIFT		6
-#define AIC32X4_DOSRMSB_SHIFT		4
-
-#define AIC32X4_PLLCLKIN		0x03
-
-#define AIC32X4_MICBIAS_LDOIN		0x08
+#define AIC32X4_REG(page, reg)	((page * 128) + reg)
+
+#define	AIC32X4_PSEL		AIC32X4_REG(0, 0)
+
+#define	AIC32X4_RESET		AIC32X4_REG(0, 1)
+#define	AIC32X4_CLKMUX		AIC32X4_REG(0, 4)
+#define	AIC32X4_PLLPR		AIC32X4_REG(0, 5)
+#define	AIC32X4_PLLJ		AIC32X4_REG(0, 6)
+#define	AIC32X4_PLLDMSB		AIC32X4_REG(0, 7)
+#define	AIC32X4_PLLDLSB		AIC32X4_REG(0, 8)
+#define	AIC32X4_NDAC		AIC32X4_REG(0, 11)
+#define	AIC32X4_MDAC		AIC32X4_REG(0, 12)
+#define AIC32X4_DOSRMSB		AIC32X4_REG(0, 13)
+#define AIC32X4_DOSRLSB		AIC32X4_REG(0, 14)
+#define	AIC32X4_NADC		AIC32X4_REG(0, 18)
+#define	AIC32X4_MADC		AIC32X4_REG(0, 19)
+#define AIC32X4_AOSR		AIC32X4_REG(0, 20)
+#define AIC32X4_CLKMUX2		AIC32X4_REG(0, 25)
+#define AIC32X4_CLKOUTM		AIC32X4_REG(0, 26)
+#define AIC32X4_IFACE1		AIC32X4_REG(0, 27)
+#define AIC32X4_IFACE2		AIC32X4_REG(0, 28)
+#define AIC32X4_IFACE3		AIC32X4_REG(0, 29)
+#define AIC32X4_BCLKN		AIC32X4_REG(0, 30)
+#define AIC32X4_IFACE4		AIC32X4_REG(0, 31)
+#define AIC32X4_IFACE5		AIC32X4_REG(0, 32)
+#define AIC32X4_IFACE6		AIC32X4_REG(0, 33)
+#define AIC32X4_GPIOCTL		AIC32X4_REG(0, 52)
+#define AIC32X4_DOUTCTL		AIC32X4_REG(0, 53)
+#define AIC32X4_DINCTL		AIC32X4_REG(0, 54)
+#define AIC32X4_MISOCTL		AIC32X4_REG(0, 55)
+#define AIC32X4_SCLKCTL		AIC32X4_REG(0, 56)
+#define AIC32X4_DACSPB		AIC32X4_REG(0, 60)
+#define AIC32X4_ADCSPB		AIC32X4_REG(0, 61)
+#define AIC32X4_DACSETUP	AIC32X4_REG(0, 63)
+#define AIC32X4_DACMUTE		AIC32X4_REG(0, 64)
+#define AIC32X4_LDACVOL		AIC32X4_REG(0, 65)
+#define AIC32X4_RDACVOL		AIC32X4_REG(0, 66)
+#define AIC32X4_ADCSETUP	AIC32X4_REG(0, 81)
+#define	AIC32X4_ADCFGA		AIC32X4_REG(0, 82)
+#define AIC32X4_LADCVOL		AIC32X4_REG(0, 83)
+#define AIC32X4_RADCVOL		AIC32X4_REG(0, 84)
+#define AIC32X4_LAGC1		AIC32X4_REG(0, 86)
+#define AIC32X4_LAGC2		AIC32X4_REG(0, 87)
+#define AIC32X4_LAGC3		AIC32X4_REG(0, 88)
+#define AIC32X4_LAGC4		AIC32X4_REG(0, 89)
+#define AIC32X4_LAGC5		AIC32X4_REG(0, 90)
+#define AIC32X4_LAGC6		AIC32X4_REG(0, 91)
+#define AIC32X4_LAGC7		AIC32X4_REG(0, 92)
+#define AIC32X4_RAGC1		AIC32X4_REG(0, 94)
+#define AIC32X4_RAGC2		AIC32X4_REG(0, 95)
+#define AIC32X4_RAGC3		AIC32X4_REG(0, 96)
+#define AIC32X4_RAGC4		AIC32X4_REG(0, 97)
+#define AIC32X4_RAGC5		AIC32X4_REG(0, 98)
+#define AIC32X4_RAGC6		AIC32X4_REG(0, 99)
+#define AIC32X4_RAGC7		AIC32X4_REG(0, 100)
+
+#define AIC32X4_PWRCFG		AIC32X4_REG(1, 1)
+#define AIC32X4_LDOCTL		AIC32X4_REG(1, 2)
+#define AIC32X4_OUTPWRCTL	AIC32X4_REG(1, 9)
+#define AIC32X4_CMMODE		AIC32X4_REG(1, 10)
+#define AIC32X4_HPLROUTE	AIC32X4_REG(1, 12)
+#define AIC32X4_HPRROUTE	AIC32X4_REG(1, 13)
+#define AIC32X4_LOLROUTE	AIC32X4_REG(1, 14)
+#define AIC32X4_LORROUTE	AIC32X4_REG(1, 15)
+#define	AIC32X4_HPLGAIN		AIC32X4_REG(1, 16)
+#define	AIC32X4_HPRGAIN		AIC32X4_REG(1, 17)
+#define	AIC32X4_LOLGAIN		AIC32X4_REG(1, 18)
+#define	AIC32X4_LORGAIN		AIC32X4_REG(1, 19)
+#define AIC32X4_HEADSTART	AIC32X4_REG(1, 20)
+#define AIC32X4_MICBIAS		AIC32X4_REG(1, 51)
+#define AIC32X4_LMICPGAPIN	AIC32X4_REG(1, 52)
+#define AIC32X4_LMICPGANIN	AIC32X4_REG(1, 54)
+#define AIC32X4_RMICPGAPIN	AIC32X4_REG(1, 55)
+#define AIC32X4_RMICPGANIN	AIC32X4_REG(1, 57)
+#define AIC32X4_FLOATINGINPUT	AIC32X4_REG(1, 58)
+#define AIC32X4_LMICPGAVOL	AIC32X4_REG(1, 59)
+#define AIC32X4_RMICPGAVOL	AIC32X4_REG(1, 60)
+
+/* Bits, masks, and shifts */
+
+/* AIC32X4_CLKMUX */
+#define AIC32X4_PLL_CLKIN_MASK		GENMASK(3, 2)
+#define AIC32X4_PLL_CLKIN_SHIFT		(2)
+#define AIC32X4_PLL_CLKIN_MCLK		(0x00)
+#define AIC32X4_PLL_CLKIN_BCKL		(0x01)
+#define AIC32X4_PLL_CLKIN_GPIO1		(0x02)
+#define AIC32X4_PLL_CLKIN_DIN		(0x03)
+#define AIC32X4_CODEC_CLKIN_MASK	GENMASK(1, 0)
+#define AIC32X4_CODEC_CLKIN_SHIFT	(0)
+#define AIC32X4_CODEC_CLKIN_MCLK	(0x00)
+#define AIC32X4_CODEC_CLKIN_BCLK	(0x01)
+#define AIC32X4_CODEC_CLKIN_GPIO1	(0x02)
+#define AIC32X4_CODEC_CLKIN_PLL		(0x03)
+
+/* AIC32X4_PLLPR */
+#define AIC32X4_PLLEN			BIT(7)
+#define AIC32X4_PLL_P_MASK		GENMASK(6, 4)
+#define AIC32X4_PLL_P_SHIFT		(4)
+#define AIC32X4_PLL_R_MASK		GENMASK(3, 0)
+
+/* AIC32X4_NDAC */
+#define AIC32X4_NDACEN			BIT(7)
+#define AIC32X4_NDAC_MASK		GENMASK(6, 0)
+
+/* AIC32X4_MDAC */
+#define AIC32X4_MDACEN			BIT(7)
+#define AIC32X4_MDAC_MASK		GENMASK(6, 0)
+
+/* AIC32X4_NADC */
+#define AIC32X4_NADCEN			BIT(7)
+#define AIC32X4_NADC_MASK		GENMASK(6, 0)
+
+/* AIC32X4_MADC */
+#define AIC32X4_MADCEN			BIT(7)
+#define AIC32X4_MADC_MASK		GENMASK(6, 0)
+
+/* AIC32X4_BCLKN */
+#define AIC32X4_BCLKEN			BIT(7)
+#define AIC32X4_BCLK_MASK		GENMASK(6, 0)
+
+/* AIC32X4_IFACE1 */
+#define AIC32X4_IFACE1_DATATYPE_MASK	GENMASK(7, 6)
+#define AIC32X4_IFACE1_DATATYPE_SHIFT	(6)
+#define AIC32X4_I2S_MODE		(0x00)
+#define AIC32X4_DSP_MODE		(0x01)
+#define AIC32X4_RIGHT_JUSTIFIED_MODE	(0x02)
+#define AIC32X4_LEFT_JUSTIFIED_MODE	(0x03)
+#define AIC32X4_IFACE1_DATALEN_MASK	GENMASK(5, 4)
+#define AIC32X4_IFACE1_DATALEN_SHIFT	(4)
+#define AIC32X4_WORD_LEN_16BITS		(0x00)
+#define AIC32X4_WORD_LEN_20BITS		(0x01)
+#define AIC32X4_WORD_LEN_24BITS		(0x02)
+#define AIC32X4_WORD_LEN_32BITS		(0x03)
+#define AIC32X4_IFACE1_MASTER_MASK	GENMASK(3, 2)
+#define AIC32X4_BCLKMASTER		BIT(2)
+#define AIC32X4_WCLKMASTER		BIT(3)
+
+/* AIC32X4_IFACE2 */
+#define AIC32X4_DATA_OFFSET_MASK	GENMASK(7, 0)
+
+/* AIC32X4_IFACE3 */
+#define AIC32X4_BCLKINV_MASK		BIT(3)
+#define AIC32X4_BDIVCLK_MASK		GENMASK(1, 0)
+#define AIC32X4_BDIVCLK_SHIFT		(0)
+#define AIC32X4_DAC2BCLK		(0x00)
+#define AIC32X4_DACMOD2BCLK		(0x01)
+#define AIC32X4_ADC2BCLK		(0x02)
+#define AIC32X4_ADCMOD2BCLK		(0x03)
+
+/* AIC32X4_DACSETUP */
+#define AIC32X4_DAC_CHAN_MASK		GENMASK(5, 2)
+#define AIC32X4_LDAC2RCHN		BIT(5)
+#define AIC32X4_LDAC2LCHN		BIT(4)
+#define AIC32X4_RDAC2LCHN		BIT(3)
+#define AIC32X4_RDAC2RCHN		BIT(2)
+
+/* AIC32X4_DACMUTE */
+#define AIC32X4_MUTEON			0x0C
+
+/* AIC32X4_ADCSETUP */
+#define AIC32X4_LADC_EN			BIT(7)
+#define AIC32X4_RADC_EN			BIT(6)
+
+/* AIC32X4_PWRCFG */
+#define AIC32X4_AVDDWEAKDISABLE		BIT(3)
+
+/* AIC32X4_LDOCTL */
+#define AIC32X4_LDOCTLEN		BIT(0)
+
+/* AIC32X4_CMMODE */
+#define AIC32X4_LDOIN_18_36		BIT(0)
+#define AIC32X4_LDOIN2HP		BIT(1)
+
+/* AIC32X4_MICBIAS */
+#define AIC32X4_MICBIAS_LDOIN		BIT(3)
 #define AIC32X4_MICBIAS_2075V		0x60
 
+/* AIC32X4_LMICPGANIN */
 #define AIC32X4_LMICPGANIN_IN2R_10K	0x10
 #define AIC32X4_LMICPGANIN_CM1L_10K	0x40
+
+/* AIC32X4_RMICPGANIN */
 #define AIC32X4_RMICPGANIN_IN1L_10K	0x10
 #define AIC32X4_RMICPGANIN_CM1R_10K	0x40
 
-#define AIC32X4_LMICPGAVOL_NOGAIN	0x80
-#define AIC32X4_RMICPGAVOL_NOGAIN	0x80
-
-#define AIC32X4_BCLKMASTER		0x08
-#define AIC32X4_WCLKMASTER		0x04
-#define AIC32X4_PLLEN			(0x01 << 7)
-#define AIC32X4_NDACEN			(0x01 << 7)
-#define AIC32X4_MDACEN			(0x01 << 7)
-#define AIC32X4_NADCEN			(0x01 << 7)
-#define AIC32X4_MADCEN			(0x01 << 7)
-#define AIC32X4_BCLKEN			(0x01 << 7)
-#define AIC32X4_DACEN			(0x03 << 6)
-#define AIC32X4_RDAC2LCHN		(0x02 << 2)
-#define AIC32X4_LDAC2RCHN		(0x02 << 4)
-#define AIC32X4_LDAC2LCHN		(0x01 << 4)
-#define AIC32X4_RDAC2RCHN		(0x01 << 2)
-#define AIC32X4_DAC_CHAN_MASK		0x3c
-
-#define AIC32X4_SSTEP2WCLK		0x01
-#define AIC32X4_MUTEON			0x0C
-#define	AIC32X4_DACMOD2BCLK		0x01
-
 #endif				/* _TLV320AIC32X4_H */
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 06f92571eba4622a89ae808911fd0b510ec2eed2..b751cad545da40d5e605ec4da2dd659b7cf19194 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1804,11 +1804,18 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
 		if (!ai3x_setup)
 			return -ENOMEM;
 
-		ret = of_get_named_gpio(np, "gpio-reset", 0);
-		if (ret >= 0)
+		ret = of_get_named_gpio(np, "reset-gpios", 0);
+		if (ret >= 0) {
 			aic3x->gpio_reset = ret;
-		else
-			aic3x->gpio_reset = -1;
+		} else {
+			ret = of_get_named_gpio(np, "gpio-reset", 0);
+			if (ret > 0) {
+				dev_warn(&i2c->dev, "Using deprecated property \"gpio-reset\", please update your DT");
+				aic3x->gpio_reset = ret;
+			} else {
+				aic3x->gpio_reset = -1;
+			}
+		}
 
 		if (of_property_read_u32_array(np, "ai3x-gpio-func",
 					ai3x_setup->gpio_func, 2) >= 0) {
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index 5b94a151539ce1f828e4af7feb643be9d62eb7fd..675f5b1b90a613c20778db5c4ff0ebdbc2ab3932 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -106,6 +106,7 @@ struct tlv320dac33_priv {
 	int mode1_latency;		/* latency caused by the i2c writes in
 					 * us */
 	u8 burst_bclkdiv;		/* BCLK divider value in burst mode */
+	u8 *reg_cache;
 	unsigned int burst_rate;	/* Interface speed in Burst modes */
 
 	int keep_bclk;			/* Keep the BCLK continuously running
@@ -121,7 +122,7 @@ struct tlv320dac33_priv {
 	unsigned int uthr;
 
 	enum dac33_state state;
-	void *control_data;
+	struct i2c_client *i2c;
 };
 
 static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
@@ -173,7 +174,8 @@ static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
 static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
 						unsigned reg)
 {
-	u8 *cache = codec->reg_cache;
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+	u8 *cache = dac33->reg_cache;
 	if (reg >= DAC33_CACHEREGNUM)
 		return 0;
 
@@ -183,7 +185,8 @@ static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
 static inline void dac33_write_reg_cache(struct snd_soc_codec *codec,
 					 u8 reg, u8 value)
 {
-	u8 *cache = codec->reg_cache;
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+	u8 *cache = dac33->reg_cache;
 	if (reg >= DAC33_CACHEREGNUM)
 		return;
 
@@ -200,7 +203,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
 
 	/* If powered off, return the cached value */
 	if (dac33->chip_power) {
-		val = i2c_smbus_read_byte_data(codec->control_data, value[0]);
+		val = i2c_smbus_read_byte_data(dac33->i2c, value[0]);
 		if (val < 0) {
 			dev_err(codec->dev, "Read failed (%d)\n", val);
 			value[0] = dac33_read_reg_cache(codec, reg);
@@ -233,7 +236,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
 
 	dac33_write_reg_cache(codec, data[0], data[1]);
 	if (dac33->chip_power) {
-		ret = codec->hw_write(codec->control_data, data, 2);
+		ret = i2c_master_send(dac33->i2c, data, 2);
 		if (ret != 2)
 			dev_err(codec->dev, "Write failed (%d)\n", ret);
 		else
@@ -243,19 +246,6 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
 	return ret;
 }
 
-static int dac33_write_locked(struct snd_soc_codec *codec, unsigned int reg,
-		       unsigned int value)
-{
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int ret;
-
-	mutex_lock(&dac33->mutex);
-	ret = dac33_write(codec, reg, value);
-	mutex_unlock(&dac33->mutex);
-
-	return ret;
-}
-
 #define DAC33_I2C_ADDR_AUTOINC	0x80
 static int dac33_write16(struct snd_soc_codec *codec, unsigned int reg,
 		       unsigned int value)
@@ -280,7 +270,7 @@ static int dac33_write16(struct snd_soc_codec *codec, unsigned int reg,
 	if (dac33->chip_power) {
 		/* We need to set autoincrement mode for 16 bit writes */
 		data[0] |= DAC33_I2C_ADDR_AUTOINC;
-		ret = codec->hw_write(codec->control_data, data, 3);
+		ret = i2c_master_send(dac33->i2c, data, 3);
 		if (ret != 3)
 			dev_err(codec->dev, "Write failed (%d)\n", ret);
 		else
@@ -1379,8 +1369,6 @@ static int dac33_soc_probe(struct snd_soc_codec *codec)
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 	int ret = 0;
 
-	codec->control_data = dac33->control_data;
-	codec->hw_write = (hw_write_t) i2c_master_send;
 	dac33->codec = codec;
 
 	/* Read the tlv320dac33 ID registers */
@@ -1434,13 +1422,9 @@ static int dac33_soc_remove(struct snd_soc_codec *codec)
 }
 
 static const struct snd_soc_codec_driver soc_codec_dev_tlv320dac33 = {
-	.read = dac33_read_reg_cache,
-	.write = dac33_write_locked,
 	.set_bias_level = dac33_set_bias_level,
 	.idle_bias_off = true,
-	.reg_cache_size = ARRAY_SIZE(dac33_reg),
-	.reg_word_size = sizeof(u8),
-	.reg_cache_default = dac33_reg,
+
 	.probe = dac33_soc_probe,
 	.remove = dac33_soc_remove,
 
@@ -1499,7 +1483,14 @@ static int dac33_i2c_probe(struct i2c_client *client,
 	if (dac33 == NULL)
 		return -ENOMEM;
 
-	dac33->control_data = client;
+	dac33->reg_cache = devm_kmemdup(&client->dev,
+					dac33_reg,
+					ARRAY_SIZE(dac33_reg) * sizeof(u8),
+					GFP_KERNEL);
+	if (!dac33->reg_cache)
+		return -ENOMEM;
+
+	dac33->i2c = client;
 	mutex_init(&dac33->mutex);
 	spin_lock_init(&dac33->lock);
 
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 738e04b09116d0669d9a8d4aae3a82e276805d3e..1271e7e1fc78c9b461fe28aa5dcac31b7bd259a7 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -241,7 +241,7 @@ int ts3a227e_enable_jack_detect(struct snd_soc_component *component,
 {
 	struct ts3a227e *ts3a227e = snd_soc_component_get_drvdata(component);
 
-	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/codecs/tscs42xx.c b/sound/soc/codecs/tscs42xx.c
new file mode 100644
index 0000000000000000000000000000000000000000..eedd600875e5c17d1d9d031d39d37c50d1c74a35
--- /dev/null
+++ b/sound/soc/codecs/tscs42xx.c
@@ -0,0 +1,1456 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.c -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tscs42xx.h"
+
+#define COEFF_SIZE 3
+#define BIQUAD_COEFF_COUNT 5
+#define BIQUAD_SIZE (COEFF_SIZE * BIQUAD_COEFF_COUNT)
+
+#define COEFF_RAM_MAX_ADDR 0xcd
+#define COEFF_RAM_COEFF_COUNT (COEFF_RAM_MAX_ADDR + 1)
+#define COEFF_RAM_SIZE (COEFF_SIZE * COEFF_RAM_COEFF_COUNT)
+
+struct tscs42xx {
+
+	int bclk_ratio;
+	int samplerate;
+	unsigned int blrcm;
+	struct mutex audio_params_lock;
+
+	u8 coeff_ram[COEFF_RAM_SIZE];
+	bool coeff_ram_synced;
+	struct mutex coeff_ram_lock;
+
+	struct mutex pll_lock;
+
+	struct regmap *regmap;
+
+	struct device *dev;
+};
+
+struct coeff_ram_ctl {
+	unsigned int addr;
+	struct soc_bytes_ext bytes_ext;
+};
+
+static bool tscs42xx_volatile(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case R_DACCRWRL:
+	case R_DACCRWRM:
+	case R_DACCRWRH:
+	case R_DACCRRDL:
+	case R_DACCRRDM:
+	case R_DACCRRDH:
+	case R_DACCRSTAT:
+	case R_DACCRADDR:
+	case R_PLLCTL0:
+		return true;
+	default:
+		return false;
+	};
+}
+
+static bool tscs42xx_precious(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case R_DACCRWRL:
+	case R_DACCRWRM:
+	case R_DACCRWRH:
+	case R_DACCRRDL:
+	case R_DACCRRDM:
+	case R_DACCRRDH:
+		return true;
+	default:
+		return false;
+	};
+}
+
+static const struct regmap_config tscs42xx_regmap = {
+	.reg_bits = 8,
+	.val_bits = 8,
+
+	.volatile_reg = tscs42xx_volatile,
+	.precious_reg = tscs42xx_precious,
+	.max_register = R_DACMBCREL3H,
+
+	.cache_type = REGCACHE_RBTREE,
+	.can_multi_write = true,
+};
+
+#define MAX_PLL_LOCK_20MS_WAITS 1
+static bool plls_locked(struct snd_soc_codec *codec)
+{
+	int ret;
+	int count = MAX_PLL_LOCK_20MS_WAITS;
+
+	do {
+		ret = snd_soc_read(codec, R_PLLCTL0);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"Failed to read PLL lock status (%d)\n", ret);
+			return false;
+		} else if (ret > 0) {
+			return true;
+		}
+		msleep(20);
+	} while (count--);
+
+	return false;
+}
+
+static int sample_rate_to_pll_freq_out(int sample_rate)
+{
+	switch (sample_rate) {
+	case 11025:
+	case 22050:
+	case 44100:
+	case 88200:
+		return 112896000;
+	case 8000:
+	case 16000:
+	case 32000:
+	case 48000:
+	case 96000:
+		return 122880000;
+	default:
+		return -EINVAL;
+	}
+}
+
+#define DACCRSTAT_MAX_TRYS 10
+static int write_coeff_ram(struct snd_soc_codec *codec, u8 *coeff_ram,
+	unsigned int addr, unsigned int coeff_cnt)
+{
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	int cnt;
+	int trys;
+	int ret;
+
+	for (cnt = 0; cnt < coeff_cnt; cnt++, addr++) {
+
+		for (trys = 0; trys < DACCRSTAT_MAX_TRYS; trys++) {
+			ret = snd_soc_read(codec, R_DACCRSTAT);
+			if (ret < 0) {
+				dev_err(codec->dev,
+					"Failed to read stat (%d)\n", ret);
+				return ret;
+			}
+			if (!ret)
+				break;
+		}
+
+		if (trys == DACCRSTAT_MAX_TRYS) {
+			ret = -EIO;
+			dev_err(codec->dev,
+				"dac coefficient write error (%d)\n", ret);
+			return ret;
+		}
+
+		ret = regmap_write(tscs42xx->regmap, R_DACCRADDR, addr);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"Failed to write dac ram address (%d)\n", ret);
+			return ret;
+		}
+
+		ret = regmap_bulk_write(tscs42xx->regmap, R_DACCRWRL,
+			&coeff_ram[addr * COEFF_SIZE],
+			COEFF_SIZE);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"Failed to write dac ram (%d)\n", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int power_up_audio_plls(struct snd_soc_codec *codec)
+{
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	int freq_out;
+	int ret;
+	unsigned int mask;
+	unsigned int val;
+
+	freq_out = sample_rate_to_pll_freq_out(tscs42xx->samplerate);
+	switch (freq_out) {
+	case 122880000: /* 48k */
+		mask = RM_PLLCTL1C_PDB_PLL1;
+		val = RV_PLLCTL1C_PDB_PLL1_ENABLE;
+		break;
+	case 112896000: /* 44.1k */
+		mask = RM_PLLCTL1C_PDB_PLL2;
+		val = RV_PLLCTL1C_PDB_PLL2_ENABLE;
+		break;
+	default:
+		ret = -EINVAL;
+		dev_err(codec->dev, "Unrecognized PLL output freq (%d)\n", ret);
+		return ret;
+	}
+
+	mutex_lock(&tscs42xx->pll_lock);
+
+	ret = snd_soc_update_bits(codec, R_PLLCTL1C, mask, val);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to turn PLL on (%d)\n", ret);
+		goto exit;
+	}
+
+	if (!plls_locked(codec)) {
+		dev_err(codec->dev, "Failed to lock plls\n");
+		ret = -ENOMSG;
+		goto exit;
+	}
+
+	ret = 0;
+exit:
+	mutex_unlock(&tscs42xx->pll_lock);
+
+	return ret;
+}
+
+static int power_down_audio_plls(struct snd_soc_codec *codec)
+{
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	mutex_lock(&tscs42xx->pll_lock);
+
+	ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+			RM_PLLCTL1C_PDB_PLL1,
+			RV_PLLCTL1C_PDB_PLL1_DISABLE);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+		goto exit;
+	}
+	ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+			RM_PLLCTL1C_PDB_PLL2,
+			RV_PLLCTL1C_PDB_PLL2_DISABLE);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+		goto exit;
+	}
+
+	ret = 0;
+exit:
+	mutex_unlock(&tscs42xx->pll_lock);
+
+	return ret;
+}
+
+static int coeff_ram_get(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	struct coeff_ram_ctl *ctl =
+		(struct coeff_ram_ctl *)kcontrol->private_value;
+	struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+	mutex_lock(&tscs42xx->coeff_ram_lock);
+
+	memcpy(ucontrol->value.bytes.data,
+		&tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE], params->max);
+
+	mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+	return 0;
+}
+
+static int coeff_ram_put(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	struct coeff_ram_ctl *ctl =
+		(struct coeff_ram_ctl *)kcontrol->private_value;
+	struct soc_bytes_ext *params = &ctl->bytes_ext;
+	unsigned int coeff_cnt = params->max / COEFF_SIZE;
+	int ret;
+
+	mutex_lock(&tscs42xx->coeff_ram_lock);
+
+	tscs42xx->coeff_ram_synced = false;
+
+	memcpy(&tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE],
+		ucontrol->value.bytes.data, params->max);
+
+	mutex_lock(&tscs42xx->pll_lock);
+
+	if (plls_locked(codec)) {
+		ret = write_coeff_ram(codec, tscs42xx->coeff_ram,
+			ctl->addr, coeff_cnt);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"Failed to flush coeff ram cache (%d)\n", ret);
+			goto exit;
+		}
+		tscs42xx->coeff_ram_synced = true;
+	}
+
+	ret = 0;
+exit:
+	mutex_unlock(&tscs42xx->pll_lock);
+
+	mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+	return ret;
+}
+
+/* Input L Capture Route */
+static char const * const input_select_text[] = {
+	"Line 1", "Line 2", "Line 3", "D2S"
+};
+
+static const struct soc_enum left_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELL, FB_INSELL, ARRAY_SIZE(input_select_text),
+		input_select_text);
+
+static const struct snd_kcontrol_new left_input_select =
+SOC_DAPM_ENUM("LEFT_INPUT_SELECT_ENUM", left_input_select_enum);
+
+/* Input R Capture Route */
+static const struct soc_enum right_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELR, FB_INSELR, ARRAY_SIZE(input_select_text),
+		input_select_text);
+
+static const struct snd_kcontrol_new right_input_select =
+SOC_DAPM_ENUM("RIGHT_INPUT_SELECT_ENUM", right_input_select_enum);
+
+/* Input Channel Mapping */
+static char const * const ch_map_select_text[] = {
+	"Normal", "Left to Right", "Right to Left", "Swap"
+};
+
+static const struct soc_enum ch_map_select_enum =
+SOC_ENUM_SINGLE(R_AIC2, FB_AIC2_ADCDSEL, ARRAY_SIZE(ch_map_select_text),
+		ch_map_select_text);
+
+static int dapm_vref_event(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *kcontrol, int event)
+{
+	msleep(20);
+	return 0;
+}
+
+static int dapm_micb_event(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *kcontrol, int event)
+{
+	msleep(20);
+	return 0;
+}
+
+int pll_event(struct snd_soc_dapm_widget *w,
+		   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	int ret;
+
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		ret = power_up_audio_plls(codec);
+	else
+		ret = power_down_audio_plls(codec);
+
+	return ret;
+}
+
+int dac_event(struct snd_soc_dapm_widget *w,
+		   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	mutex_lock(&tscs42xx->coeff_ram_lock);
+
+	if (tscs42xx->coeff_ram_synced == false) {
+		ret = write_coeff_ram(codec, tscs42xx->coeff_ram, 0x00,
+			COEFF_RAM_COEFF_COUNT);
+		if (ret < 0)
+			goto exit;
+		tscs42xx->coeff_ram_synced = true;
+	}
+
+	ret = 0;
+exit:
+	mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+	return ret;
+}
+
+static const struct snd_soc_dapm_widget tscs42xx_dapm_widgets[] = {
+	/* Vref */
+	SND_SOC_DAPM_SUPPLY_S("Vref", 1, R_PWRM2, FB_PWRM2_VREF, 0,
+		dapm_vref_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+	/* PLL */
+	SND_SOC_DAPM_SUPPLY("PLL", SND_SOC_NOPM, 0, 0, pll_event,
+		SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	/* Headphone */
+	SND_SOC_DAPM_DAC_E("DAC L", "HiFi Playback", R_PWRM2, FB_PWRM2_HPL, 0,
+			dac_event, SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_DAC_E("DAC R", "HiFi Playback", R_PWRM2, FB_PWRM2_HPR, 0,
+			dac_event, SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_OUTPUT("Headphone L"),
+	SND_SOC_DAPM_OUTPUT("Headphone R"),
+
+	/* Speaker */
+	SND_SOC_DAPM_DAC_E("ClassD L", "HiFi Playback",
+		R_PWRM2, FB_PWRM2_SPKL, 0,
+		dac_event, SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_DAC_E("ClassD R", "HiFi Playback",
+		R_PWRM2, FB_PWRM2_SPKR, 0,
+		dac_event, SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_OUTPUT("Speaker L"),
+	SND_SOC_DAPM_OUTPUT("Speaker R"),
+
+	/* Capture */
+	SND_SOC_DAPM_PGA("Analog In PGA L", R_PWRM1, FB_PWRM1_PGAL, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Analog In PGA R", R_PWRM1, FB_PWRM1_PGAR, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Analog Boost L", R_PWRM1, FB_PWRM1_BSTL, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Analog Boost R", R_PWRM1, FB_PWRM1_BSTR, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("ADC Mute", R_CNVRTR0, FB_CNVRTR0_HPOR, true, NULL, 0),
+	SND_SOC_DAPM_ADC("ADC L", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCL, 0),
+	SND_SOC_DAPM_ADC("ADC R", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCR, 0),
+
+	/* Capture Input */
+	SND_SOC_DAPM_MUX("Input L Capture Route", R_PWRM2,
+			FB_PWRM2_INSELL, 0, &left_input_select),
+	SND_SOC_DAPM_MUX("Input R Capture Route", R_PWRM2,
+			FB_PWRM2_INSELR, 0, &right_input_select),
+
+	/* Digital Mic */
+	SND_SOC_DAPM_SUPPLY_S("Digital Mic Enable", 2, R_DMICCTL,
+		FB_DMICCTL_DMICEN, 0, NULL,
+		SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+	/* Analog Mic */
+	SND_SOC_DAPM_SUPPLY_S("Mic Bias", 2, R_PWRM1, FB_PWRM1_MICB,
+		0, dapm_micb_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+	/* Line In */
+	SND_SOC_DAPM_INPUT("Line In 1 L"),
+	SND_SOC_DAPM_INPUT("Line In 1 R"),
+	SND_SOC_DAPM_INPUT("Line In 2 L"),
+	SND_SOC_DAPM_INPUT("Line In 2 R"),
+	SND_SOC_DAPM_INPUT("Line In 3 L"),
+	SND_SOC_DAPM_INPUT("Line In 3 R"),
+};
+
+static const struct snd_soc_dapm_route tscs42xx_intercon[] = {
+	{"DAC L", NULL, "PLL"},
+	{"DAC R", NULL, "PLL"},
+	{"DAC L", NULL, "Vref"},
+	{"DAC R", NULL, "Vref"},
+	{"Headphone L", NULL, "DAC L"},
+	{"Headphone R", NULL, "DAC R"},
+
+	{"ClassD L", NULL, "PLL"},
+	{"ClassD R", NULL, "PLL"},
+	{"ClassD L", NULL, "Vref"},
+	{"ClassD R", NULL, "Vref"},
+	{"Speaker L", NULL, "ClassD L"},
+	{"Speaker R", NULL, "ClassD R"},
+
+	{"Input L Capture Route", NULL, "Vref"},
+	{"Input R Capture Route", NULL, "Vref"},
+
+	{"Mic Bias", NULL, "Vref"},
+
+	{"Input L Capture Route", "Line 1", "Line In 1 L"},
+	{"Input R Capture Route", "Line 1", "Line In 1 R"},
+	{"Input L Capture Route", "Line 2", "Line In 2 L"},
+	{"Input R Capture Route", "Line 2", "Line In 2 R"},
+	{"Input L Capture Route", "Line 3", "Line In 3 L"},
+	{"Input R Capture Route", "Line 3", "Line In 3 R"},
+
+	{"Analog In PGA L", NULL, "Input L Capture Route"},
+	{"Analog In PGA R", NULL, "Input R Capture Route"},
+	{"Analog Boost L", NULL, "Analog In PGA L"},
+	{"Analog Boost R", NULL, "Analog In PGA R"},
+	{"ADC Mute", NULL, "Analog Boost L"},
+	{"ADC Mute", NULL, "Analog Boost R"},
+	{"ADC L", NULL, "PLL"},
+	{"ADC R", NULL, "PLL"},
+	{"ADC L", NULL, "ADC Mute"},
+	{"ADC R", NULL, "ADC Mute"},
+};
+
+/************
+ * CONTROLS *
+ ************/
+
+static char const * const eq_band_enable_text[] = {
+	"Prescale only",
+	"Band1",
+	"Band1:2",
+	"Band1:3",
+	"Band1:4",
+	"Band1:5",
+	"Band1:6",
+};
+
+static char const * const level_detection_text[] = {
+	"Average",
+	"Peak",
+};
+
+static char const * const level_detection_window_text[] = {
+	"512 Samples",
+	"64 Samples",
+};
+
+static char const * const compressor_ratio_text[] = {
+	"Reserved", "1.5:1", "2:1", "3:1", "4:1", "5:1", "6:1",
+	"7:1", "8:1", "9:1", "10:1", "11:1", "12:1", "13:1", "14:1",
+	"15:1", "16:1", "17:1", "18:1", "19:1", "20:1",
+};
+
+static DECLARE_TLV_DB_SCALE(hpvol_scale, -8850, 75, 0);
+static DECLARE_TLV_DB_SCALE(spkvol_scale, -7725, 75, 0);
+static DECLARE_TLV_DB_SCALE(dacvol_scale, -9563, 38, 0);
+static DECLARE_TLV_DB_SCALE(adcvol_scale, -7125, 38, 0);
+static DECLARE_TLV_DB_SCALE(invol_scale, -1725, 75, 0);
+static DECLARE_TLV_DB_SCALE(mic_boost_scale, 0, 1000, 0);
+static DECLARE_TLV_DB_MINMAX(mugain_scale, 0, 4650);
+static DECLARE_TLV_DB_MINMAX(compth_scale, -9562, 0);
+
+static const struct soc_enum eq1_band_enable_enum =
+	SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ1_BE,
+		ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum eq2_band_enable_enum =
+	SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ2_BE,
+		ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum cle_level_detection_enum =
+	SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_LVL_MODE,
+		ARRAY_SIZE(level_detection_text),
+		level_detection_text);
+
+static const struct soc_enum cle_level_detection_window_enum =
+	SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_WINDOWSEL,
+		ARRAY_SIZE(level_detection_window_text),
+		level_detection_window_text);
+
+static const struct soc_enum mbc_level_detection_enums[] = {
+	SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE1,
+		ARRAY_SIZE(level_detection_text),
+			level_detection_text),
+	SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE2,
+		ARRAY_SIZE(level_detection_text),
+			level_detection_text),
+	SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE3,
+		ARRAY_SIZE(level_detection_text),
+			level_detection_text),
+};
+
+static const struct soc_enum mbc_level_detection_window_enums[] = {
+	SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL1,
+		ARRAY_SIZE(level_detection_window_text),
+			level_detection_window_text),
+	SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL2,
+		ARRAY_SIZE(level_detection_window_text),
+			level_detection_window_text),
+	SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL3,
+		ARRAY_SIZE(level_detection_window_text),
+			level_detection_window_text),
+};
+
+static const struct soc_enum compressor_ratio_enum =
+	SOC_ENUM_SINGLE(R_CMPRAT, FB_CMPRAT,
+		ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc1_compressor_ratio_enum =
+	SOC_ENUM_SINGLE(R_DACMBCRAT1, FB_DACMBCRAT1_RATIO,
+		ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc2_compressor_ratio_enum =
+	SOC_ENUM_SINGLE(R_DACMBCRAT2, FB_DACMBCRAT2_RATIO,
+		ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc3_compressor_ratio_enum =
+	SOC_ENUM_SINGLE(R_DACMBCRAT3, FB_DACMBCRAT3_RATIO,
+		ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static int bytes_info_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *ucontrol)
+{
+	struct coeff_ram_ctl *ctl =
+		(struct coeff_ram_ctl *)kcontrol->private_value;
+	struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+	ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+	ucontrol->count = params->max;
+
+	return 0;
+}
+
+#define COEFF_RAM_CTL(xname, xcount, xaddr) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = bytes_info_ext, \
+	.get = coeff_ram_get, .put = coeff_ram_put, \
+	.private_value = (unsigned long)&(struct coeff_ram_ctl) { \
+		.addr = xaddr, \
+		.bytes_ext = {.max = xcount, }, \
+	} \
+}
+
+static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
+	/* Volumes */
+	SOC_DOUBLE_R_TLV("Headphone Playback Volume", R_HPVOLL, R_HPVOLR,
+			FB_HPVOLL, 0x7F, 0, hpvol_scale),
+	SOC_DOUBLE_R_TLV("Speaker Playback Volume", R_SPKVOLL, R_SPKVOLR,
+			FB_SPKVOLL, 0x7F, 0, spkvol_scale),
+	SOC_DOUBLE_R_TLV("Master Playback Volume", R_DACVOLL, R_DACVOLR,
+			FB_DACVOLL, 0xFF, 0, dacvol_scale),
+	SOC_DOUBLE_R_TLV("PCM Capture Volume", R_ADCVOLL, R_ADCVOLR,
+			FB_ADCVOLL, 0xFF, 0, adcvol_scale),
+	SOC_DOUBLE_R_TLV("Master Capture Volume", R_INVOLL, R_INVOLR,
+			FB_INVOLL, 0x3F, 0, invol_scale),
+
+	/* INSEL */
+	SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", R_INSELL, R_INSELR,
+			FB_INSELL_MICBSTL, FV_INSELL_MICBSTL_30DB,
+			0, mic_boost_scale),
+
+	/* Input Channel Map */
+	SOC_ENUM("Input Channel Map Switch", ch_map_select_enum),
+
+	/* Coefficient Ram */
+	COEFF_RAM_CTL("Cascade1L BiQuad1", BIQUAD_SIZE, 0x00),
+	COEFF_RAM_CTL("Cascade1L BiQuad2", BIQUAD_SIZE, 0x05),
+	COEFF_RAM_CTL("Cascade1L BiQuad3", BIQUAD_SIZE, 0x0a),
+	COEFF_RAM_CTL("Cascade1L BiQuad4", BIQUAD_SIZE, 0x0f),
+	COEFF_RAM_CTL("Cascade1L BiQuad5", BIQUAD_SIZE, 0x14),
+	COEFF_RAM_CTL("Cascade1L BiQuad6", BIQUAD_SIZE, 0x19),
+
+	COEFF_RAM_CTL("Cascade1R BiQuad1", BIQUAD_SIZE, 0x20),
+	COEFF_RAM_CTL("Cascade1R BiQuad2", BIQUAD_SIZE, 0x25),
+	COEFF_RAM_CTL("Cascade1R BiQuad3", BIQUAD_SIZE, 0x2a),
+	COEFF_RAM_CTL("Cascade1R BiQuad4", BIQUAD_SIZE, 0x2f),
+	COEFF_RAM_CTL("Cascade1R BiQuad5", BIQUAD_SIZE, 0x34),
+	COEFF_RAM_CTL("Cascade1R BiQuad6", BIQUAD_SIZE, 0x39),
+
+	COEFF_RAM_CTL("Cascade1L Prescale", COEFF_SIZE, 0x1f),
+	COEFF_RAM_CTL("Cascade1R Prescale", COEFF_SIZE, 0x3f),
+
+	COEFF_RAM_CTL("Cascade2L BiQuad1", BIQUAD_SIZE, 0x40),
+	COEFF_RAM_CTL("Cascade2L BiQuad2", BIQUAD_SIZE, 0x45),
+	COEFF_RAM_CTL("Cascade2L BiQuad3", BIQUAD_SIZE, 0x4a),
+	COEFF_RAM_CTL("Cascade2L BiQuad4", BIQUAD_SIZE, 0x4f),
+	COEFF_RAM_CTL("Cascade2L BiQuad5", BIQUAD_SIZE, 0x54),
+	COEFF_RAM_CTL("Cascade2L BiQuad6", BIQUAD_SIZE, 0x59),
+
+	COEFF_RAM_CTL("Cascade2R BiQuad1", BIQUAD_SIZE, 0x60),
+	COEFF_RAM_CTL("Cascade2R BiQuad2", BIQUAD_SIZE, 0x65),
+	COEFF_RAM_CTL("Cascade2R BiQuad3", BIQUAD_SIZE, 0x6a),
+	COEFF_RAM_CTL("Cascade2R BiQuad4", BIQUAD_SIZE, 0x6f),
+	COEFF_RAM_CTL("Cascade2R BiQuad5", BIQUAD_SIZE, 0x74),
+	COEFF_RAM_CTL("Cascade2R BiQuad6", BIQUAD_SIZE, 0x79),
+
+	COEFF_RAM_CTL("Cascade2L Prescale", COEFF_SIZE, 0x5f),
+	COEFF_RAM_CTL("Cascade2R Prescale", COEFF_SIZE, 0x7f),
+
+	COEFF_RAM_CTL("Bass Extraction BiQuad1", BIQUAD_SIZE, 0x80),
+	COEFF_RAM_CTL("Bass Extraction BiQuad2", BIQUAD_SIZE, 0x85),
+
+	COEFF_RAM_CTL("Bass Non Linear Function 1", COEFF_SIZE, 0x8a),
+	COEFF_RAM_CTL("Bass Non Linear Function 2", COEFF_SIZE, 0x8b),
+
+	COEFF_RAM_CTL("Bass Limiter BiQuad", BIQUAD_SIZE, 0x8c),
+
+	COEFF_RAM_CTL("Bass Cut Off BiQuad", BIQUAD_SIZE, 0x91),
+
+	COEFF_RAM_CTL("Bass Mix", COEFF_SIZE, 0x96),
+
+	COEFF_RAM_CTL("Treb Extraction BiQuad1", BIQUAD_SIZE, 0x97),
+	COEFF_RAM_CTL("Treb Extraction BiQuad2", BIQUAD_SIZE, 0x9c),
+
+	COEFF_RAM_CTL("Treb Non Linear Function 1", COEFF_SIZE, 0xa1),
+	COEFF_RAM_CTL("Treb Non Linear Function 2", COEFF_SIZE, 0xa2),
+
+	COEFF_RAM_CTL("Treb Limiter BiQuad", BIQUAD_SIZE, 0xa3),
+
+	COEFF_RAM_CTL("Treb Cut Off BiQuad", BIQUAD_SIZE, 0xa8),
+
+	COEFF_RAM_CTL("Treb Mix", COEFF_SIZE, 0xad),
+
+	COEFF_RAM_CTL("3D", COEFF_SIZE, 0xae),
+
+	COEFF_RAM_CTL("3D Mix", COEFF_SIZE, 0xaf),
+
+	COEFF_RAM_CTL("MBC1 BiQuad1", BIQUAD_SIZE, 0xb0),
+	COEFF_RAM_CTL("MBC1 BiQuad2", BIQUAD_SIZE, 0xb5),
+
+	COEFF_RAM_CTL("MBC2 BiQuad1", BIQUAD_SIZE, 0xba),
+	COEFF_RAM_CTL("MBC2 BiQuad2", BIQUAD_SIZE, 0xbf),
+
+	COEFF_RAM_CTL("MBC3 BiQuad1", BIQUAD_SIZE, 0xc4),
+	COEFF_RAM_CTL("MBC3 BiQuad2", BIQUAD_SIZE, 0xc9),
+
+	/* EQ */
+	SOC_SINGLE("EQ1 Switch", R_CONFIG1, FB_CONFIG1_EQ1_EN, 1, 0),
+	SOC_SINGLE("EQ2 Switch", R_CONFIG1, FB_CONFIG1_EQ2_EN, 1, 0),
+	SOC_ENUM("EQ1 Band Enable Switch", eq1_band_enable_enum),
+	SOC_ENUM("EQ2 Band Enable Switch", eq2_band_enable_enum),
+
+	/* CLE */
+	SOC_ENUM("CLE Level Detect Switch",
+		cle_level_detection_enum),
+	SOC_ENUM("CLE Level Detect Win Switch",
+		cle_level_detection_window_enum),
+	SOC_SINGLE("Expander Switch",
+		R_CLECTL, FB_CLECTL_EXP_EN, 1, 0),
+	SOC_SINGLE("Limiter Switch",
+		R_CLECTL, FB_CLECTL_LIMIT_EN, 1, 0),
+	SOC_SINGLE("Comp Switch",
+		R_CLECTL, FB_CLECTL_COMP_EN, 1, 0),
+	SOC_SINGLE_TLV("CLE Make-Up Gain Playback Volume",
+		R_MUGAIN, FB_MUGAIN_CLEMUG, 0x1f, 0, mugain_scale),
+	SOC_SINGLE_TLV("Comp Thresh Playback Volume",
+		R_COMPTH, FB_COMPTH, 0xff, 0, compth_scale),
+	SOC_ENUM("Comp Ratio Switch", compressor_ratio_enum),
+	SND_SOC_BYTES("Comp Atk Time", R_CATKTCL, 2),
+
+	/* Effects */
+	SOC_SINGLE("3D Switch", R_FXCTL, FB_FXCTL_3DEN, 1, 0),
+	SOC_SINGLE("Treble Switch", R_FXCTL, FB_FXCTL_TEEN, 1, 0),
+	SOC_SINGLE("Treble Bypass Switch", R_FXCTL, FB_FXCTL_TNLFBYPASS, 1, 0),
+	SOC_SINGLE("Bass Switch", R_FXCTL, FB_FXCTL_BEEN, 1, 0),
+	SOC_SINGLE("Bass Bypass Switch", R_FXCTL, FB_FXCTL_BNLFBYPASS, 1, 0),
+
+	/* MBC */
+	SOC_SINGLE("MBC Band1 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN1, 1, 0),
+	SOC_SINGLE("MBC Band2 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN2, 1, 0),
+	SOC_SINGLE("MBC Band3 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN3, 1, 0),
+	SOC_ENUM("MBC Band1 Level Detect Switch",
+		mbc_level_detection_enums[0]),
+	SOC_ENUM("MBC Band2 Level Detect Switch",
+		mbc_level_detection_enums[1]),
+	SOC_ENUM("MBC Band3 Level Detect Switch",
+		mbc_level_detection_enums[2]),
+	SOC_ENUM("MBC Band1 Level Detect Win Switch",
+		mbc_level_detection_window_enums[0]),
+	SOC_ENUM("MBC Band2 Level Detect Win Switch",
+		mbc_level_detection_window_enums[1]),
+	SOC_ENUM("MBC Band3 Level Detect Win Switch",
+		mbc_level_detection_window_enums[2]),
+
+	SOC_SINGLE("MBC1 Phase Invert", R_DACMBCMUG1, FB_DACMBCMUG1_PHASE,
+		1, 0),
+	SOC_SINGLE_TLV("DAC MBC1 Make-Up Gain Playback Volume",
+		R_DACMBCMUG1, FB_DACMBCMUG1_MUGAIN, 0x1f, 0, mugain_scale),
+	SOC_SINGLE_TLV("DAC MBC1 Comp Thresh Playback Volume",
+		R_DACMBCTHR1, FB_DACMBCTHR1_THRESH, 0xff, 0, compth_scale),
+	SOC_ENUM("DAC MBC1 Comp Ratio Switch",
+		dac_mbc1_compressor_ratio_enum),
+	SND_SOC_BYTES("DAC MBC1 Comp Atk Time", R_DACMBCATK1L, 2),
+	SND_SOC_BYTES("DAC MBC1 Comp Rel Time Const",
+		R_DACMBCREL1L, 2),
+
+	SOC_SINGLE("MBC2 Phase Invert", R_DACMBCMUG2, FB_DACMBCMUG2_PHASE,
+		1, 0),
+	SOC_SINGLE_TLV("DAC MBC2 Make-Up Gain Playback Volume",
+		R_DACMBCMUG2, FB_DACMBCMUG2_MUGAIN, 0x1f, 0, mugain_scale),
+	SOC_SINGLE_TLV("DAC MBC2 Comp Thresh Playback Volume",
+		R_DACMBCTHR2, FB_DACMBCTHR2_THRESH, 0xff, 0, compth_scale),
+	SOC_ENUM("DAC MBC2 Comp Ratio Switch",
+		dac_mbc2_compressor_ratio_enum),
+	SND_SOC_BYTES("DAC MBC2 Comp Atk Time", R_DACMBCATK2L, 2),
+	SND_SOC_BYTES("DAC MBC2 Comp Rel Time Const",
+		R_DACMBCREL2L, 2),
+
+	SOC_SINGLE("MBC3 Phase Invert", R_DACMBCMUG3, FB_DACMBCMUG3_PHASE,
+		1, 0),
+	SOC_SINGLE_TLV("DAC MBC3 Make-Up Gain Playback Volume",
+		R_DACMBCMUG3, FB_DACMBCMUG3_MUGAIN, 0x1f, 0, mugain_scale),
+	SOC_SINGLE_TLV("DAC MBC3 Comp Thresh Playback Volume",
+		R_DACMBCTHR3, FB_DACMBCTHR3_THRESH, 0xff, 0, compth_scale),
+	SOC_ENUM("DAC MBC3 Comp Ratio Switch",
+		dac_mbc3_compressor_ratio_enum),
+	SND_SOC_BYTES("DAC MBC3 Comp Atk Time", R_DACMBCATK3L, 2),
+	SND_SOC_BYTES("DAC MBC3 Comp Rel Time Const",
+		R_DACMBCREL3L, 2),
+};
+
+static int setup_sample_format(struct snd_soc_codec *codec,
+		snd_pcm_format_t format)
+{
+	unsigned int width;
+	int ret;
+
+	switch (format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		width = RV_AIC1_WL_16;
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		width = RV_AIC1_WL_20;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		width = RV_AIC1_WL_24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		width = RV_AIC1_WL_32;
+		break;
+	default:
+		ret = -EINVAL;
+		dev_err(codec->dev, "Unsupported format width (%d)\n", ret);
+		return ret;
+	}
+	ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_WL, width);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set sample width (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int setup_sample_rate(struct snd_soc_codec *codec, unsigned int rate)
+{
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	unsigned int br, bm;
+	int ret;
+
+	switch (rate) {
+	case 8000:
+		br = RV_DACSR_DBR_32;
+		bm = RV_DACSR_DBM_PT25;
+		break;
+	case 16000:
+		br = RV_DACSR_DBR_32;
+		bm = RV_DACSR_DBM_PT5;
+		break;
+	case 24000:
+		br = RV_DACSR_DBR_48;
+		bm = RV_DACSR_DBM_PT5;
+		break;
+	case 32000:
+		br = RV_DACSR_DBR_32;
+		bm = RV_DACSR_DBM_1;
+		break;
+	case 48000:
+		br = RV_DACSR_DBR_48;
+		bm = RV_DACSR_DBM_1;
+		break;
+	case 96000:
+		br = RV_DACSR_DBR_48;
+		bm = RV_DACSR_DBM_2;
+		break;
+	case 11025:
+		br = RV_DACSR_DBR_44_1;
+		bm = RV_DACSR_DBM_PT25;
+		break;
+	case 22050:
+		br = RV_DACSR_DBR_44_1;
+		bm = RV_DACSR_DBM_PT5;
+		break;
+	case 44100:
+		br = RV_DACSR_DBR_44_1;
+		bm = RV_DACSR_DBM_1;
+		break;
+	case 88200:
+		br = RV_DACSR_DBR_44_1;
+		bm = RV_DACSR_DBM_2;
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported sample rate %d\n", rate);
+		return -EINVAL;
+	}
+
+	/* DAC and ADC share bit and frame clock */
+	ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBR, br);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+		return ret;
+	}
+	ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBM, bm);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+		return ret;
+	}
+	ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBR, br);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+		return ret;
+	}
+	ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBM, bm);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+		return ret;
+	}
+
+	mutex_lock(&tscs42xx->audio_params_lock);
+
+	tscs42xx->samplerate = rate;
+
+	mutex_unlock(&tscs42xx->audio_params_lock);
+
+	return 0;
+}
+
+struct reg_setting {
+	unsigned int addr;
+	unsigned int val;
+	unsigned int mask;
+};
+
+#define PLL_REG_SETTINGS_COUNT 13
+struct pll_ctl {
+	int input_freq;
+	struct reg_setting settings[PLL_REG_SETTINGS_COUNT];
+};
+
+#define PLL_CTL(f, rt, rd, r1b_l, r9, ra, rb,		\
+		rc, r12, r1b_h, re, rf, r10, r11)	\
+	{						\
+		.input_freq = f,			\
+		.settings = {				\
+			{R_TIMEBASE,  rt,   0xFF},	\
+			{R_PLLCTLD,   rd,   0xFF},	\
+			{R_PLLCTL1B, r1b_l, 0x0F},	\
+			{R_PLLCTL9,   r9,   0xFF},	\
+			{R_PLLCTLA,   ra,   0xFF},	\
+			{R_PLLCTLB,   rb,   0xFF},	\
+			{R_PLLCTLC,   rc,   0xFF},	\
+			{R_PLLCTL12, r12,   0xFF},	\
+			{R_PLLCTL1B, r1b_h, 0xF0},	\
+			{R_PLLCTLE,   re,   0xFF},	\
+			{R_PLLCTLF,   rf,   0xFF},	\
+			{R_PLLCTL10, r10,   0xFF},	\
+			{R_PLLCTL11, r11,   0xFF},	\
+		},					\
+	}
+
+static const struct pll_ctl pll_ctls[] = {
+	PLL_CTL(1411200, 0x05,
+		0x39, 0x04, 0x07, 0x02, 0xC3, 0x04,
+		0x1B, 0x10, 0x03, 0x03, 0xD0, 0x02),
+	PLL_CTL(1536000, 0x05,
+		0x1A, 0x04, 0x02, 0x03, 0xE0, 0x01,
+		0x1A, 0x10, 0x02, 0x03, 0xB9, 0x01),
+	PLL_CTL(2822400, 0x0A,
+		0x23, 0x04, 0x07, 0x04, 0xC3, 0x04,
+		0x22, 0x10, 0x05, 0x03, 0x58, 0x02),
+	PLL_CTL(3072000, 0x0B,
+		0x22, 0x04, 0x07, 0x03, 0x48, 0x03,
+		0x1A, 0x10, 0x04, 0x03, 0xB9, 0x01),
+	PLL_CTL(5644800, 0x15,
+		0x23, 0x04, 0x0E, 0x04, 0xC3, 0x04,
+		0x1A, 0x10, 0x08, 0x03, 0xE0, 0x01),
+	PLL_CTL(6144000, 0x17,
+		0x1A, 0x04, 0x08, 0x03, 0xE0, 0x01,
+		0x1A, 0x10, 0x08, 0x03, 0xB9, 0x01),
+	PLL_CTL(12000000, 0x2E,
+		0x1B, 0x04, 0x19, 0x03, 0x00, 0x03,
+		0x2A, 0x10, 0x19, 0x05, 0x98, 0x04),
+	PLL_CTL(19200000, 0x4A,
+		0x13, 0x04, 0x14, 0x03, 0x80, 0x01,
+		0x1A, 0x10, 0x19, 0x03, 0xB9, 0x01),
+	PLL_CTL(22000000, 0x55,
+		0x2A, 0x04, 0x37, 0x05, 0x00, 0x06,
+		0x22, 0x10, 0x26, 0x03, 0x49, 0x02),
+	PLL_CTL(22579200, 0x57,
+		0x22, 0x04, 0x31, 0x03, 0x20, 0x03,
+		0x1A, 0x10, 0x1D, 0x03, 0xB3, 0x01),
+	PLL_CTL(24000000, 0x5D,
+		0x13, 0x04, 0x19, 0x03, 0x80, 0x01,
+		0x1B, 0x10, 0x19, 0x05, 0x4C, 0x02),
+	PLL_CTL(24576000, 0x5F,
+		0x13, 0x04, 0x1D, 0x03, 0xB3, 0x01,
+		0x22, 0x10, 0x40, 0x03, 0x72, 0x03),
+	PLL_CTL(27000000, 0x68,
+		0x22, 0x04, 0x4B, 0x03, 0x00, 0x04,
+		0x2A, 0x10, 0x7D, 0x03, 0x20, 0x06),
+	PLL_CTL(36000000, 0x8C,
+		0x1B, 0x04, 0x4B, 0x03, 0x00, 0x03,
+		0x2A, 0x10, 0x7D, 0x03, 0x98, 0x04),
+	PLL_CTL(25000000, 0x61,
+		0x1B, 0x04, 0x37, 0x03, 0x2B, 0x03,
+		0x1A, 0x10, 0x2A, 0x03, 0x39, 0x02),
+	PLL_CTL(26000000, 0x65,
+		0x23, 0x04, 0x41, 0x05, 0x00, 0x06,
+		0x1A, 0x10, 0x26, 0x03, 0xEF, 0x01),
+	PLL_CTL(12288000, 0x2F,
+		0x1A, 0x04, 0x12, 0x03, 0x1C, 0x02,
+		0x22, 0x10, 0x20, 0x03, 0x72, 0x03),
+	PLL_CTL(40000000, 0x9B,
+		0x22, 0x08, 0x7D, 0x03, 0x80, 0x04,
+		0x23, 0x10, 0x7D, 0x05, 0xE4, 0x06),
+	PLL_CTL(512000, 0x01,
+		0x22, 0x04, 0x01, 0x03, 0xD0, 0x02,
+		0x1B, 0x10, 0x01, 0x04, 0x72, 0x03),
+	PLL_CTL(705600, 0x02,
+		0x22, 0x04, 0x02, 0x03, 0x15, 0x04,
+		0x22, 0x10, 0x01, 0x04, 0x80, 0x02),
+	PLL_CTL(1024000, 0x03,
+		0x22, 0x04, 0x02, 0x03, 0xD0, 0x02,
+		0x1B, 0x10, 0x02, 0x04, 0x72, 0x03),
+	PLL_CTL(2048000, 0x07,
+		0x22, 0x04, 0x04, 0x03, 0xD0, 0x02,
+		0x1B, 0x10, 0x04, 0x04, 0x72, 0x03),
+	PLL_CTL(2400000, 0x08,
+		0x22, 0x04, 0x05, 0x03, 0x00, 0x03,
+		0x23, 0x10, 0x05, 0x05, 0x98, 0x04),
+};
+
+static const struct pll_ctl *get_pll_ctl(int input_freq)
+{
+	int i;
+	const struct pll_ctl *pll_ctl = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(pll_ctls); ++i)
+		if (input_freq == pll_ctls[i].input_freq) {
+			pll_ctl = &pll_ctls[i];
+			break;
+		}
+
+	return pll_ctl;
+}
+
+static int set_pll_ctl_from_input_freq(struct snd_soc_codec *codec,
+		const int input_freq)
+{
+	int ret;
+	int i;
+	const struct pll_ctl *pll_ctl;
+
+	pll_ctl = get_pll_ctl(input_freq);
+	if (!pll_ctl) {
+		ret = -EINVAL;
+		dev_err(codec->dev, "No PLL input entry for %d (%d)\n",
+			input_freq, ret);
+		return ret;
+	}
+
+	for (i = 0; i < PLL_REG_SETTINGS_COUNT; ++i) {
+		ret = snd_soc_update_bits(codec,
+			pll_ctl->settings[i].addr,
+			pll_ctl->settings[i].mask,
+			pll_ctl->settings[i].val);
+		if (ret < 0) {
+			dev_err(codec->dev, "Failed to set pll ctl (%d)\n",
+				ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int tscs42xx_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params,
+		struct snd_soc_dai *codec_dai)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int ret;
+
+	ret = setup_sample_format(codec, params_format(params));
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to setup sample format (%d)\n",
+			ret);
+		return ret;
+	}
+
+	ret = setup_sample_rate(codec, params_rate(params));
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to setup sample rate (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline int dac_mute(struct snd_soc_codec *codec)
+{
+	int ret;
+
+	ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+		RV_CNVRTR1_DACMU_ENABLE);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to mute DAC (%d)\n",
+				ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline int dac_unmute(struct snd_soc_codec *codec)
+{
+	int ret;
+
+	ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+		RV_CNVRTR1_DACMU_DISABLE);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to unmute DAC (%d)\n",
+				ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline int adc_mute(struct snd_soc_codec *codec)
+{
+	int ret;
+
+	ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+		RV_CNVRTR0_ADCMU_ENABLE);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to mute ADC (%d)\n",
+				ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline int adc_unmute(struct snd_soc_codec *codec)
+{
+	int ret;
+
+	ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+		RV_CNVRTR0_ADCMU_DISABLE);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to unmute ADC (%d)\n",
+				ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tscs42xx_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int ret;
+
+	if (mute)
+		if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+			ret = dac_mute(codec);
+		else
+			ret = adc_mute(codec);
+	else
+		if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+			ret = dac_unmute(codec);
+		else
+			ret = adc_unmute(codec);
+
+	return ret;
+}
+
+static int tscs42xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int ret;
+
+	/* Slave mode not supported since it needs always-on frame clock */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_MS,
+				RV_AIC1_MS_MASTER);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"Failed to set codec DAI master (%d)\n", ret);
+			return ret;
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		dev_err(codec->dev, "Unsupported format (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tscs42xx_set_dai_bclk_ratio(struct snd_soc_dai *codec_dai,
+		unsigned int ratio)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+	unsigned int value;
+	int ret = 0;
+
+	switch (ratio) {
+	case 32:
+		value = RV_DACSR_DBCM_32;
+		break;
+	case 40:
+		value = RV_DACSR_DBCM_40;
+		break;
+	case 64:
+		value = RV_DACSR_DBCM_64;
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported bclk ratio (%d)\n", ret);
+		return -EINVAL;
+	}
+
+	ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBCM, value);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set DAC BCLK ratio (%d)\n", ret);
+		return ret;
+	}
+	ret = snd_soc_update_bits(codec, R_ADCSR, RM_ADCSR_ABCM, value);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set ADC BCLK ratio (%d)\n", ret);
+		return ret;
+	}
+
+	mutex_lock(&tscs42xx->audio_params_lock);
+
+	tscs42xx->bclk_ratio = ratio;
+
+	mutex_unlock(&tscs42xx->audio_params_lock);
+
+	return 0;
+}
+
+static int tscs42xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+	int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int ret;
+
+	switch (clk_id) {
+	case TSCS42XX_PLL_SRC_XTAL:
+	case TSCS42XX_PLL_SRC_MCLK1:
+		ret = snd_soc_write(codec, R_PLLREFSEL,
+				RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 |
+				RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"Failed to set pll reference input (%d)\n",
+				ret);
+			return ret;
+		}
+		break;
+	case TSCS42XX_PLL_SRC_MCLK2:
+		ret = snd_soc_write(codec, R_PLLREFSEL,
+				RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 |
+				RV_PLLREFSEL_PLL2_REF_SEL_MCLK2);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"Failed to set PLL reference (%d)\n", ret);
+			return ret;
+		}
+		break;
+	default:
+		dev_err(codec->dev, "pll src is unsupported\n");
+		return -EINVAL;
+	}
+
+	ret = set_pll_ctl_from_input_freq(codec, freq);
+	if (ret < 0) {
+		dev_err(codec->dev,
+			"Failed to setup PLL input freq (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops tscs42xx_dai_ops = {
+	.hw_params	= tscs42xx_hw_params,
+	.mute_stream	= tscs42xx_mute_stream,
+	.set_fmt	= tscs42xx_set_dai_fmt,
+	.set_bclk_ratio = tscs42xx_set_dai_bclk_ratio,
+	.set_sysclk	= tscs42xx_set_dai_sysclk,
+};
+
+static int part_is_valid(struct tscs42xx *tscs42xx)
+{
+	int val;
+	int ret;
+	unsigned int reg;
+
+	ret = regmap_read(tscs42xx->regmap, R_DEVIDH, &reg);
+	if (ret < 0)
+		return ret;
+
+	val = reg << 8;
+	ret = regmap_read(tscs42xx->regmap, R_DEVIDL, &reg);
+	if (ret < 0)
+		return ret;
+
+	val |= reg;
+
+	switch (val) {
+	case 0x4A74:
+	case 0x4A73:
+		return true;
+	default:
+		return false;
+	};
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tscs42xx = {
+	.component_driver = {
+		.dapm_widgets = tscs42xx_dapm_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(tscs42xx_dapm_widgets),
+		.dapm_routes = tscs42xx_intercon,
+		.num_dapm_routes = ARRAY_SIZE(tscs42xx_intercon),
+		.controls =	tscs42xx_snd_controls,
+		.num_controls = ARRAY_SIZE(tscs42xx_snd_controls),
+	},
+};
+
+static inline void init_coeff_ram_cache(struct tscs42xx *tscs42xx)
+{
+	const u8 norm_addrs[] = { 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1f,
+		0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3f, 0x40, 0x45, 0x4a,
+		0x4f, 0x54, 0x59, 0x5f, 0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79,
+		0x7f, 0x80, 0x85, 0x8c, 0x91, 0x96, 0x97, 0x9c, 0xa3, 0xa8,
+		0xad, 0xaf, 0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, };
+	u8 *coeff_ram = tscs42xx->coeff_ram;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(norm_addrs); i++)
+		coeff_ram[((norm_addrs[i] + 1) * COEFF_SIZE) - 1] = 0x40;
+}
+
+#define TSCS42XX_RATES SNDRV_PCM_RATE_8000_96000
+
+#define TSCS42XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
+	| SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver tscs42xx_dai = {
+	.name = "tscs42xx-HiFi",
+	.playback = {
+		.stream_name = "HiFi Playback",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = TSCS42XX_RATES,
+		.formats = TSCS42XX_FORMATS,},
+	.capture = {
+		.stream_name = "HiFi Capture",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = TSCS42XX_RATES,
+		.formats = TSCS42XX_FORMATS,},
+	.ops = &tscs42xx_dai_ops,
+	.symmetric_rates = 1,
+	.symmetric_channels = 1,
+	.symmetric_samplebits = 1,
+};
+
+static const struct reg_sequence tscs42xx_patch[] = {
+	{ R_AIC2, RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED },
+};
+
+static int tscs42xx_i2c_probe(struct i2c_client *i2c,
+		const struct i2c_device_id *id)
+{
+	struct tscs42xx *tscs42xx;
+	int ret = 0;
+
+	tscs42xx = devm_kzalloc(&i2c->dev, sizeof(*tscs42xx), GFP_KERNEL);
+	if (!tscs42xx) {
+		ret = -ENOMEM;
+		dev_err(&i2c->dev,
+			"Failed to allocate memory for data (%d)\n", ret);
+		return ret;
+	}
+	i2c_set_clientdata(i2c, tscs42xx);
+	tscs42xx->dev = &i2c->dev;
+
+	tscs42xx->regmap = devm_regmap_init_i2c(i2c, &tscs42xx_regmap);
+	if (IS_ERR(tscs42xx->regmap)) {
+		ret = PTR_ERR(tscs42xx->regmap);
+		dev_err(tscs42xx->dev, "Failed to allocate regmap (%d)\n", ret);
+		return ret;
+	}
+
+	init_coeff_ram_cache(tscs42xx);
+
+	ret = part_is_valid(tscs42xx);
+	if (ret <= 0) {
+		dev_err(tscs42xx->dev, "No valid part (%d)\n", ret);
+		ret = -ENODEV;
+		return ret;
+	}
+
+	ret = regmap_write(tscs42xx->regmap, R_RESET, RV_RESET_ENABLE);
+	if (ret < 0) {
+		dev_err(tscs42xx->dev, "Failed to reset device (%d)\n", ret);
+		return ret;
+	}
+
+	ret = regmap_register_patch(tscs42xx->regmap, tscs42xx_patch,
+			ARRAY_SIZE(tscs42xx_patch));
+	if (ret < 0) {
+		dev_err(tscs42xx->dev, "Failed to apply patch (%d)\n", ret);
+		return ret;
+	}
+
+	mutex_init(&tscs42xx->audio_params_lock);
+	mutex_init(&tscs42xx->coeff_ram_lock);
+	mutex_init(&tscs42xx->pll_lock);
+
+	ret = snd_soc_register_codec(tscs42xx->dev, &soc_codec_dev_tscs42xx,
+			&tscs42xx_dai, 1);
+	if (ret) {
+		dev_err(tscs42xx->dev, "Failed to register codec (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int tscs42xx_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+
+	return 0;
+}
+
+static const struct i2c_device_id tscs42xx_i2c_id[] = {
+	{ "tscs42A1", 0 },
+	{ "tscs42A2", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, tscs42xx_i2c_id);
+
+static const struct of_device_id tscs42xx_of_match[] = {
+	{ .compatible = "tempo,tscs42A1", },
+	{ .compatible = "tempo,tscs42A2", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, tscs42xx_of_match);
+
+static struct i2c_driver tscs42xx_i2c_driver = {
+	.driver = {
+		.name = "tscs42xx",
+		.owner = THIS_MODULE,
+		.of_match_table = tscs42xx_of_match,
+	},
+	.probe =    tscs42xx_i2c_probe,
+	.remove =   tscs42xx_i2c_remove,
+	.id_table = tscs42xx_i2c_id,
+};
+
+module_i2c_driver(tscs42xx_i2c_driver);
+
+MODULE_AUTHOR("Tempo Semiconductor <steven.eckhoff.opensource@gmail.com");
+MODULE_DESCRIPTION("ASoC TSCS42xx driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tscs42xx.h b/sound/soc/codecs/tscs42xx.h
new file mode 100644
index 0000000000000000000000000000000000000000..d4a30bcbf64ba7f7195563d9b2264df37fadd0e2
--- /dev/null
+++ b/sound/soc/codecs/tscs42xx.h
@@ -0,0 +1,2693 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.h -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#ifndef __WOOKIE_H__
+#define __WOOKIE_H__
+
+enum {
+	TSCS42XX_PLL_SRC_NONE,
+	TSCS42XX_PLL_SRC_XTAL,
+	TSCS42XX_PLL_SRC_MCLK1,
+	TSCS42XX_PLL_SRC_MCLK2,
+};
+
+#define R_HPVOLL        0x0
+#define R_HPVOLR        0x1
+#define R_SPKVOLL       0x2
+#define R_SPKVOLR       0x3
+#define R_DACVOLL       0x4
+#define R_DACVOLR       0x5
+#define R_ADCVOLL       0x6
+#define R_ADCVOLR       0x7
+#define R_INVOLL        0x8
+#define R_INVOLR        0x9
+#define R_INMODE        0x0B
+#define R_INSELL        0x0C
+#define R_INSELR        0x0D
+#define R_AIC1          0x13
+#define R_AIC2          0x14
+#define R_CNVRTR0       0x16
+#define R_ADCSR         0x17
+#define R_CNVRTR1       0x18
+#define R_DACSR         0x19
+#define R_PWRM1         0x1A
+#define R_PWRM2         0x1B
+#define R_CONFIG0       0x1F
+#define R_CONFIG1       0x20
+#define R_DMICCTL       0x24
+#define R_CLECTL        0x25
+#define R_MUGAIN        0x26
+#define R_COMPTH        0x27
+#define R_CMPRAT        0x28
+#define R_CATKTCL       0x29
+#define R_CATKTCH       0x2A
+#define R_CRELTCL       0x2B
+#define R_CRELTCH       0x2C
+#define R_LIMTH         0x2D
+#define R_LIMTGT        0x2E
+#define R_LATKTCL       0x2F
+#define R_LATKTCH       0x30
+#define R_LRELTCL       0x31
+#define R_LRELTCH       0x32
+#define R_EXPTH         0x33
+#define R_EXPRAT        0x34
+#define R_XATKTCL       0x35
+#define R_XATKTCH       0x36
+#define R_XRELTCL       0x37
+#define R_XRELTCH       0x38
+#define R_FXCTL         0x39
+#define R_DACCRWRL      0x3A
+#define R_DACCRWRM      0x3B
+#define R_DACCRWRH      0x3C
+#define R_DACCRRDL      0x3D
+#define R_DACCRRDM      0x3E
+#define R_DACCRRDH      0x3F
+#define R_DACCRADDR     0x40
+#define R_DCOFSEL       0x41
+#define R_PLLCTL9       0x4E
+#define R_PLLCTLA       0x4F
+#define R_PLLCTLB       0x50
+#define R_PLLCTLC       0x51
+#define R_PLLCTLD       0x52
+#define R_PLLCTLE       0x53
+#define R_PLLCTLF       0x54
+#define R_PLLCTL10      0x55
+#define R_PLLCTL11      0x56
+#define R_PLLCTL12      0x57
+#define R_PLLCTL1B      0x60
+#define R_PLLCTL1C      0x61
+#define R_TIMEBASE      0x77
+#define R_DEVIDL        0x7D
+#define R_DEVIDH        0x7E
+#define R_RESET         0x80
+#define R_DACCRSTAT     0x8A
+#define R_PLLCTL0       0x8E
+#define R_PLLREFSEL     0x8F
+#define R_DACMBCEN      0xC7
+#define R_DACMBCCTL     0xC8
+#define R_DACMBCMUG1    0xC9
+#define R_DACMBCTHR1    0xCA
+#define R_DACMBCRAT1    0xCB
+#define R_DACMBCATK1L   0xCC
+#define R_DACMBCATK1H   0xCD
+#define R_DACMBCREL1L   0xCE
+#define R_DACMBCREL1H   0xCF
+#define R_DACMBCMUG2    0xD0
+#define R_DACMBCTHR2    0xD1
+#define R_DACMBCRAT2    0xD2
+#define R_DACMBCATK2L   0xD3
+#define R_DACMBCATK2H   0xD4
+#define R_DACMBCREL2L   0xD5
+#define R_DACMBCREL2H   0xD6
+#define R_DACMBCMUG3    0xD7
+#define R_DACMBCTHR3    0xD8
+#define R_DACMBCRAT3    0xD9
+#define R_DACMBCATK3L   0xDA
+#define R_DACMBCATK3H   0xDB
+#define R_DACMBCREL3L   0xDC
+#define R_DACMBCREL3H   0xDD
+
+/* Helpers */
+#define RM(m, b) ((m)<<(b))
+#define RV(v, b) ((v)<<(b))
+
+/****************************
+ *      R_HPVOLL (0x0)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLL                            0
+
+/* Field Masks */
+#define FM_HPVOLL                            0X7F
+
+/* Field Values */
+#define FV_HPVOLL_P6DB                       0x7F
+#define FV_HPVOLL_N88PT5DB                   0x1
+#define FV_HPVOLL_MUTE                       0x0
+
+/* Register Masks */
+#define RM_HPVOLL                            RM(FM_HPVOLL, FB_HPVOLL)
+
+/* Register Values */
+#define RV_HPVOLL_P6DB                       RV(FV_HPVOLL_P6DB, FB_HPVOLL)
+#define RV_HPVOLL_N88PT5DB                   RV(FV_HPVOLL_N88PT5DB, FB_HPVOLL)
+#define RV_HPVOLL_MUTE                       RV(FV_HPVOLL_MUTE, FB_HPVOLL)
+
+/****************************
+ *      R_HPVOLR (0x1)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLR                            0
+
+/* Field Masks */
+#define FM_HPVOLR                            0X7F
+
+/* Field Values */
+#define FV_HPVOLR_P6DB                       0x7F
+#define FV_HPVOLR_N88PT5DB                   0x1
+#define FV_HPVOLR_MUTE                       0x0
+
+/* Register Masks */
+#define RM_HPVOLR                            RM(FM_HPVOLR, FB_HPVOLR)
+
+/* Register Values */
+#define RV_HPVOLR_P6DB                       RV(FV_HPVOLR_P6DB, FB_HPVOLR)
+#define RV_HPVOLR_N88PT5DB                   RV(FV_HPVOLR_N88PT5DB, FB_HPVOLR)
+#define RV_HPVOLR_MUTE                       RV(FV_HPVOLR_MUTE, FB_HPVOLR)
+
+/*****************************
+ *      R_SPKVOLL (0x2)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLL                           0
+
+/* Field Masks */
+#define FM_SPKVOLL                           0X7F
+
+/* Field Values */
+#define FV_SPKVOLL_P12DB                     0x7F
+#define FV_SPKVOLL_N77PT25DB                 0x8
+#define FV_SPKVOLL_MUTE                      0x0
+
+/* Register Masks */
+#define RM_SPKVOLL                           RM(FM_SPKVOLL, FB_SPKVOLL)
+
+/* Register Values */
+#define RV_SPKVOLL_P12DB                     RV(FV_SPKVOLL_P12DB, FB_SPKVOLL)
+#define RV_SPKVOLL_N77PT25DB \
+	 RV(FV_SPKVOLL_N77PT25DB, FB_SPKVOLL)
+
+#define RV_SPKVOLL_MUTE                      RV(FV_SPKVOLL_MUTE, FB_SPKVOLL)
+
+/*****************************
+ *      R_SPKVOLR (0x3)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLR                           0
+
+/* Field Masks */
+#define FM_SPKVOLR                           0X7F
+
+/* Field Values */
+#define FV_SPKVOLR_P12DB                     0x7F
+#define FV_SPKVOLR_N77PT25DB                 0x8
+#define FV_SPKVOLR_MUTE                      0x0
+
+/* Register Masks */
+#define RM_SPKVOLR                           RM(FM_SPKVOLR, FB_SPKVOLR)
+
+/* Register Values */
+#define RV_SPKVOLR_P12DB                     RV(FV_SPKVOLR_P12DB, FB_SPKVOLR)
+#define RV_SPKVOLR_N77PT25DB \
+	 RV(FV_SPKVOLR_N77PT25DB, FB_SPKVOLR)
+
+#define RV_SPKVOLR_MUTE                      RV(FV_SPKVOLR_MUTE, FB_SPKVOLR)
+
+/*****************************
+ *      R_DACVOLL (0x4)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLL                           0
+
+/* Field Masks */
+#define FM_DACVOLL                           0XFF
+
+/* Field Values */
+#define FV_DACVOLL_0DB                       0xFF
+#define FV_DACVOLL_N95PT625DB                0x1
+#define FV_DACVOLL_MUTE                      0x0
+
+/* Register Masks */
+#define RM_DACVOLL                           RM(FM_DACVOLL, FB_DACVOLL)
+
+/* Register Values */
+#define RV_DACVOLL_0DB                       RV(FV_DACVOLL_0DB, FB_DACVOLL)
+#define RV_DACVOLL_N95PT625DB \
+	 RV(FV_DACVOLL_N95PT625DB, FB_DACVOLL)
+
+#define RV_DACVOLL_MUTE                      RV(FV_DACVOLL_MUTE, FB_DACVOLL)
+
+/*****************************
+ *      R_DACVOLR (0x5)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLR                           0
+
+/* Field Masks */
+#define FM_DACVOLR                           0XFF
+
+/* Field Values */
+#define FV_DACVOLR_0DB                       0xFF
+#define FV_DACVOLR_N95PT625DB                0x1
+#define FV_DACVOLR_MUTE                      0x0
+
+/* Register Masks */
+#define RM_DACVOLR                           RM(FM_DACVOLR, FB_DACVOLR)
+
+/* Register Values */
+#define RV_DACVOLR_0DB                       RV(FV_DACVOLR_0DB, FB_DACVOLR)
+#define RV_DACVOLR_N95PT625DB \
+	 RV(FV_DACVOLR_N95PT625DB, FB_DACVOLR)
+
+#define RV_DACVOLR_MUTE                      RV(FV_DACVOLR_MUTE, FB_DACVOLR)
+
+/*****************************
+ *      R_ADCVOLL (0x6)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLL                           0
+
+/* Field Masks */
+#define FM_ADCVOLL                           0XFF
+
+/* Field Values */
+#define FV_ADCVOLL_P24DB                     0xFF
+#define FV_ADCVOLL_N71PT25DB                 0x1
+#define FV_ADCVOLL_MUTE                      0x0
+
+/* Register Masks */
+#define RM_ADCVOLL                           RM(FM_ADCVOLL, FB_ADCVOLL)
+
+/* Register Values */
+#define RV_ADCVOLL_P24DB                     RV(FV_ADCVOLL_P24DB, FB_ADCVOLL)
+#define RV_ADCVOLL_N71PT25DB \
+	 RV(FV_ADCVOLL_N71PT25DB, FB_ADCVOLL)
+
+#define RV_ADCVOLL_MUTE                      RV(FV_ADCVOLL_MUTE, FB_ADCVOLL)
+
+/*****************************
+ *      R_ADCVOLR (0x7)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLR                           0
+
+/* Field Masks */
+#define FM_ADCVOLR                           0XFF
+
+/* Field Values */
+#define FV_ADCVOLR_P24DB                     0xFF
+#define FV_ADCVOLR_N71PT25DB                 0x1
+#define FV_ADCVOLR_MUTE                      0x0
+
+/* Register Masks */
+#define RM_ADCVOLR                           RM(FM_ADCVOLR, FB_ADCVOLR)
+
+/* Register Values */
+#define RV_ADCVOLR_P24DB                     RV(FV_ADCVOLR_P24DB, FB_ADCVOLR)
+#define RV_ADCVOLR_N71PT25DB \
+	 RV(FV_ADCVOLR_N71PT25DB, FB_ADCVOLR)
+
+#define RV_ADCVOLR_MUTE                      RV(FV_ADCVOLR_MUTE, FB_ADCVOLR)
+
+/****************************
+ *      R_INVOLL (0x8)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLL_INMUTEL                    7
+#define FB_INVOLL_IZCL                       6
+#define FB_INVOLL                            0
+
+/* Field Masks */
+#define FM_INVOLL_INMUTEL                    0X1
+#define FM_INVOLL_IZCL                       0X1
+#define FM_INVOLL                            0X3F
+
+/* Field Values */
+#define FV_INVOLL_INMUTEL_ENABLE             0x1
+#define FV_INVOLL_INMUTEL_DISABLE            0x0
+#define FV_INVOLL_IZCL_ENABLE                0x1
+#define FV_INVOLL_IZCL_DISABLE               0x0
+#define FV_INVOLL_P30DB                      0x3F
+#define FV_INVOLL_N17PT25DB                  0x0
+
+/* Register Masks */
+#define RM_INVOLL_INMUTEL \
+	 RM(FM_INVOLL_INMUTEL, FB_INVOLL_INMUTEL)
+
+#define RM_INVOLL_IZCL                       RM(FM_INVOLL_IZCL, FB_INVOLL_IZCL)
+#define RM_INVOLL                            RM(FM_INVOLL, FB_INVOLL)
+
+/* Register Values */
+#define RV_INVOLL_INMUTEL_ENABLE \
+	 RV(FV_INVOLL_INMUTEL_ENABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_INMUTEL_DISABLE \
+	 RV(FV_INVOLL_INMUTEL_DISABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_IZCL_ENABLE \
+	 RV(FV_INVOLL_IZCL_ENABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_IZCL_DISABLE \
+	 RV(FV_INVOLL_IZCL_DISABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_P30DB                      RV(FV_INVOLL_P30DB, FB_INVOLL)
+#define RV_INVOLL_N17PT25DB                  RV(FV_INVOLL_N17PT25DB, FB_INVOLL)
+
+/****************************
+ *      R_INVOLR (0x9)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLR_INMUTER                    7
+#define FB_INVOLR_IZCR                       6
+#define FB_INVOLR                            0
+
+/* Field Masks */
+#define FM_INVOLR_INMUTER                    0X1
+#define FM_INVOLR_IZCR                       0X1
+#define FM_INVOLR                            0X3F
+
+/* Field Values */
+#define FV_INVOLR_INMUTER_ENABLE             0x1
+#define FV_INVOLR_INMUTER_DISABLE            0x0
+#define FV_INVOLR_IZCR_ENABLE                0x1
+#define FV_INVOLR_IZCR_DISABLE               0x0
+#define FV_INVOLR_P30DB                      0x3F
+#define FV_INVOLR_N17PT25DB                  0x0
+
+/* Register Masks */
+#define RM_INVOLR_INMUTER \
+	 RM(FM_INVOLR_INMUTER, FB_INVOLR_INMUTER)
+
+#define RM_INVOLR_IZCR                       RM(FM_INVOLR_IZCR, FB_INVOLR_IZCR)
+#define RM_INVOLR                            RM(FM_INVOLR, FB_INVOLR)
+
+/* Register Values */
+#define RV_INVOLR_INMUTER_ENABLE \
+	 RV(FV_INVOLR_INMUTER_ENABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_INMUTER_DISABLE \
+	 RV(FV_INVOLR_INMUTER_DISABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_IZCR_ENABLE \
+	 RV(FV_INVOLR_IZCR_ENABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_IZCR_DISABLE \
+	 RV(FV_INVOLR_IZCR_DISABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_P30DB                      RV(FV_INVOLR_P30DB, FB_INVOLR)
+#define RV_INVOLR_N17PT25DB                  RV(FV_INVOLR_N17PT25DB, FB_INVOLR)
+
+/*****************************
+ *      R_INMODE (0x0B)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INMODE_DS                         0
+
+/* Field Masks */
+#define FM_INMODE_DS                         0X1
+
+/* Field Values */
+#define FV_INMODE_DS_LRIN1                   0x0
+#define FV_INMODE_DS_LRIN2                   0x1
+
+/* Register Masks */
+#define RM_INMODE_DS                         RM(FM_INMODE_DS, FB_INMODE_DS)
+
+/* Register Values */
+#define RV_INMODE_DS_LRIN1 \
+	 RV(FV_INMODE_DS_LRIN1, FB_INMODE_DS)
+
+#define RV_INMODE_DS_LRIN2 \
+	 RV(FV_INMODE_DS_LRIN2, FB_INMODE_DS)
+
+
+/*****************************
+ *      R_INSELL (0x0C)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELL                            6
+#define FB_INSELL_MICBSTL                    4
+
+/* Field Masks */
+#define FM_INSELL                            0X3
+#define FM_INSELL_MICBSTL                    0X3
+
+/* Field Values */
+#define FV_INSELL_IN1                        0x0
+#define FV_INSELL_IN2                        0x1
+#define FV_INSELL_IN3                        0x2
+#define FV_INSELL_D2S                        0x3
+#define FV_INSELL_MICBSTL_OFF                0x0
+#define FV_INSELL_MICBSTL_10DB               0x1
+#define FV_INSELL_MICBSTL_20DB               0x2
+#define FV_INSELL_MICBSTL_30DB               0x3
+
+/* Register Masks */
+#define RM_INSELL                            RM(FM_INSELL, FB_INSELL)
+#define RM_INSELL_MICBSTL \
+	 RM(FM_INSELL_MICBSTL, FB_INSELL_MICBSTL)
+
+
+/* Register Values */
+#define RV_INSELL_IN1                        RV(FV_INSELL_IN1, FB_INSELL)
+#define RV_INSELL_IN2                        RV(FV_INSELL_IN2, FB_INSELL)
+#define RV_INSELL_IN3                        RV(FV_INSELL_IN3, FB_INSELL)
+#define RV_INSELL_D2S                        RV(FV_INSELL_D2S, FB_INSELL)
+#define RV_INSELL_MICBSTL_OFF \
+	 RV(FV_INSELL_MICBSTL_OFF, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_10DB \
+	 RV(FV_INSELL_MICBSTL_10DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_20DB \
+	 RV(FV_INSELL_MICBSTL_20DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_30DB \
+	 RV(FV_INSELL_MICBSTL_30DB, FB_INSELL_MICBSTL)
+
+
+/*****************************
+ *      R_INSELR (0x0D)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELR                            6
+#define FB_INSELR_MICBSTR                    4
+
+/* Field Masks */
+#define FM_INSELR                            0X3
+#define FM_INSELR_MICBSTR                    0X3
+
+/* Field Values */
+#define FV_INSELR_IN1                        0x0
+#define FV_INSELR_IN2                        0x1
+#define FV_INSELR_IN3                        0x2
+#define FV_INSELR_D2S                        0x3
+#define FV_INSELR_MICBSTR_OFF                0x0
+#define FV_INSELR_MICBSTR_10DB               0x1
+#define FV_INSELR_MICBSTR_20DB               0x2
+#define FV_INSELR_MICBSTR_30DB               0x3
+
+/* Register Masks */
+#define RM_INSELR                            RM(FM_INSELR, FB_INSELR)
+#define RM_INSELR_MICBSTR \
+	 RM(FM_INSELR_MICBSTR, FB_INSELR_MICBSTR)
+
+
+/* Register Values */
+#define RV_INSELR_IN1                        RV(FV_INSELR_IN1, FB_INSELR)
+#define RV_INSELR_IN2                        RV(FV_INSELR_IN2, FB_INSELR)
+#define RV_INSELR_IN3                        RV(FV_INSELR_IN3, FB_INSELR)
+#define RV_INSELR_D2S                        RV(FV_INSELR_D2S, FB_INSELR)
+#define RV_INSELR_MICBSTR_OFF \
+	 RV(FV_INSELR_MICBSTR_OFF, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_10DB \
+	 RV(FV_INSELR_MICBSTR_10DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_20DB \
+	 RV(FV_INSELR_MICBSTR_20DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_30DB \
+	 RV(FV_INSELR_MICBSTR_30DB, FB_INSELR_MICBSTR)
+
+
+/***************************
+ *      R_AIC1 (0x13)      *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC1_BCLKINV                      6
+#define FB_AIC1_MS                           5
+#define FB_AIC1_LRP                          4
+#define FB_AIC1_WL                           2
+#define FB_AIC1_FORMAT                       0
+
+/* Field Masks */
+#define FM_AIC1_BCLKINV                      0X1
+#define FM_AIC1_MS                           0X1
+#define FM_AIC1_LRP                          0X1
+#define FM_AIC1_WL                           0X3
+#define FM_AIC1_FORMAT                       0X3
+
+/* Field Values */
+#define FV_AIC1_BCLKINV_ENABLE               0x1
+#define FV_AIC1_BCLKINV_DISABLE              0x0
+#define FV_AIC1_MS_MASTER                    0x1
+#define FV_AIC1_MS_SLAVE                     0x0
+#define FV_AIC1_LRP_INVERT                   0x1
+#define FV_AIC1_LRP_NORMAL                   0x0
+#define FV_AIC1_WL_16                        0x0
+#define FV_AIC1_WL_20                        0x1
+#define FV_AIC1_WL_24                        0x2
+#define FV_AIC1_WL_32                        0x3
+#define FV_AIC1_FORMAT_RIGHT                 0x0
+#define FV_AIC1_FORMAT_LEFT                  0x1
+#define FV_AIC1_FORMAT_I2S                   0x2
+
+/* Register Masks */
+#define RM_AIC1_BCLKINV \
+	 RM(FM_AIC1_BCLKINV, FB_AIC1_BCLKINV)
+
+#define RM_AIC1_MS                           RM(FM_AIC1_MS, FB_AIC1_MS)
+#define RM_AIC1_LRP                          RM(FM_AIC1_LRP, FB_AIC1_LRP)
+#define RM_AIC1_WL                           RM(FM_AIC1_WL, FB_AIC1_WL)
+#define RM_AIC1_FORMAT                       RM(FM_AIC1_FORMAT, FB_AIC1_FORMAT)
+
+/* Register Values */
+#define RV_AIC1_BCLKINV_ENABLE \
+	 RV(FV_AIC1_BCLKINV_ENABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_BCLKINV_DISABLE \
+	 RV(FV_AIC1_BCLKINV_DISABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_MS_MASTER                    RV(FV_AIC1_MS_MASTER, FB_AIC1_MS)
+#define RV_AIC1_MS_SLAVE                     RV(FV_AIC1_MS_SLAVE, FB_AIC1_MS)
+#define RV_AIC1_LRP_INVERT \
+	 RV(FV_AIC1_LRP_INVERT, FB_AIC1_LRP)
+
+#define RV_AIC1_LRP_NORMAL \
+	 RV(FV_AIC1_LRP_NORMAL, FB_AIC1_LRP)
+
+#define RV_AIC1_WL_16                        RV(FV_AIC1_WL_16, FB_AIC1_WL)
+#define RV_AIC1_WL_20                        RV(FV_AIC1_WL_20, FB_AIC1_WL)
+#define RV_AIC1_WL_24                        RV(FV_AIC1_WL_24, FB_AIC1_WL)
+#define RV_AIC1_WL_32                        RV(FV_AIC1_WL_32, FB_AIC1_WL)
+#define RV_AIC1_FORMAT_RIGHT \
+	 RV(FV_AIC1_FORMAT_RIGHT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_LEFT \
+	 RV(FV_AIC1_FORMAT_LEFT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_I2S \
+	 RV(FV_AIC1_FORMAT_I2S, FB_AIC1_FORMAT)
+
+
+/***************************
+ *      R_AIC2 (0x14)      *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC2_DACDSEL                      6
+#define FB_AIC2_ADCDSEL                      4
+#define FB_AIC2_TRI                          3
+#define FB_AIC2_BLRCM                        0
+
+/* Field Masks */
+#define FM_AIC2_DACDSEL                      0X3
+#define FM_AIC2_ADCDSEL                      0X3
+#define FM_AIC2_TRI                          0X1
+#define FM_AIC2_BLRCM                        0X7
+
+/* Field Values */
+#define FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED  0x3
+
+/* Register Masks */
+#define RM_AIC2_DACDSEL \
+	 RM(FM_AIC2_DACDSEL, FB_AIC2_DACDSEL)
+
+#define RM_AIC2_ADCDSEL \
+	 RM(FM_AIC2_ADCDSEL, FB_AIC2_ADCDSEL)
+
+#define RM_AIC2_TRI                          RM(FM_AIC2_TRI, FB_AIC2_TRI)
+#define RM_AIC2_BLRCM                        RM(FM_AIC2_BLRCM, FB_AIC2_BLRCM)
+
+/* Register Values */
+#define RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED \
+	 RV(FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED, FB_AIC2_BLRCM)
+
+
+/******************************
+ *      R_CNVRTR0 (0x16)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR0_ADCPOLR                   7
+#define FB_CNVRTR0_ADCPOLL                   6
+#define FB_CNVRTR0_AMONOMIX                  4
+#define FB_CNVRTR0_ADCMU                     3
+#define FB_CNVRTR0_HPOR                      2
+#define FB_CNVRTR0_ADCHPDR                   1
+#define FB_CNVRTR0_ADCHPDL                   0
+
+/* Field Masks */
+#define FM_CNVRTR0_ADCPOLR                   0X1
+#define FM_CNVRTR0_ADCPOLL                   0X1
+#define FM_CNVRTR0_AMONOMIX                  0X3
+#define FM_CNVRTR0_ADCMU                     0X1
+#define FM_CNVRTR0_HPOR                      0X1
+#define FM_CNVRTR0_ADCHPDR                   0X1
+#define FM_CNVRTR0_ADCHPDL                   0X1
+
+/* Field Values */
+#define FV_CNVRTR0_ADCPOLR_INVERT            0x1
+#define FV_CNVRTR0_ADCPOLR_NORMAL            0x0
+#define FV_CNVRTR0_ADCPOLL_INVERT            0x1
+#define FV_CNVRTR0_ADCPOLL_NORMAL            0x0
+#define FV_CNVRTR0_ADCMU_ENABLE              0x1
+#define FV_CNVRTR0_ADCMU_DISABLE             0x0
+#define FV_CNVRTR0_ADCHPDR_ENABLE            0x1
+#define FV_CNVRTR0_ADCHPDR_DISABLE           0x0
+#define FV_CNVRTR0_ADCHPDL_ENABLE            0x1
+#define FV_CNVRTR0_ADCHPDL_DISABLE           0x0
+
+/* Register Masks */
+#define RM_CNVRTR0_ADCPOLR \
+	 RM(FM_CNVRTR0_ADCPOLR, FB_CNVRTR0_ADCPOLR)
+
+#define RM_CNVRTR0_ADCPOLL \
+	 RM(FM_CNVRTR0_ADCPOLL, FB_CNVRTR0_ADCPOLL)
+
+#define RM_CNVRTR0_AMONOMIX \
+	 RM(FM_CNVRTR0_AMONOMIX, FB_CNVRTR0_AMONOMIX)
+
+#define RM_CNVRTR0_ADCMU \
+	 RM(FM_CNVRTR0_ADCMU, FB_CNVRTR0_ADCMU)
+
+#define RM_CNVRTR0_HPOR \
+	 RM(FM_CNVRTR0_HPOR, FB_CNVRTR0_HPOR)
+
+#define RM_CNVRTR0_ADCHPDR \
+	 RM(FM_CNVRTR0_ADCHPDR, FB_CNVRTR0_ADCHPDR)
+
+#define RM_CNVRTR0_ADCHPDL \
+	 RM(FM_CNVRTR0_ADCHPDL, FB_CNVRTR0_ADCHPDL)
+
+
+/* Register Values */
+#define RV_CNVRTR0_ADCPOLR_INVERT \
+	 RV(FV_CNVRTR0_ADCPOLR_INVERT, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLR_NORMAL \
+	 RV(FV_CNVRTR0_ADCPOLR_NORMAL, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLL_INVERT \
+	 RV(FV_CNVRTR0_ADCPOLL_INVERT, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCPOLL_NORMAL \
+	 RV(FV_CNVRTR0_ADCPOLL_NORMAL, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCMU_ENABLE \
+	 RV(FV_CNVRTR0_ADCMU_ENABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCMU_DISABLE \
+	 RV(FV_CNVRTR0_ADCMU_DISABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCHPDR_ENABLE \
+	 RV(FV_CNVRTR0_ADCHPDR_ENABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDR_DISABLE \
+	 RV(FV_CNVRTR0_ADCHPDR_DISABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDL_ENABLE \
+	 RV(FV_CNVRTR0_ADCHPDL_ENABLE, FB_CNVRTR0_ADCHPDL)
+
+#define RV_CNVRTR0_ADCHPDL_DISABLE \
+	 RV(FV_CNVRTR0_ADCHPDL_DISABLE, FB_CNVRTR0_ADCHPDL)
+
+
+/****************************
+ *      R_ADCSR (0x17)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_ADCSR_ABCM                        6
+#define FB_ADCSR_ABR                         3
+#define FB_ADCSR_ABM                         0
+
+/* Field Masks */
+#define FM_ADCSR_ABCM                        0X3
+#define FM_ADCSR_ABR                         0X3
+#define FM_ADCSR_ABM                         0X7
+
+/* Field Values */
+#define FV_ADCSR_ABCM_AUTO                   0x0
+#define FV_ADCSR_ABCM_32                     0x1
+#define FV_ADCSR_ABCM_40                     0x2
+#define FV_ADCSR_ABCM_64                     0x3
+#define FV_ADCSR_ABR_32                      0x0
+#define FV_ADCSR_ABR_44_1                    0x1
+#define FV_ADCSR_ABR_48                      0x2
+#define FV_ADCSR_ABM_PT25                    0x0
+#define FV_ADCSR_ABM_PT5                     0x1
+#define FV_ADCSR_ABM_1                       0x2
+#define FV_ADCSR_ABM_2                       0x3
+
+/* Register Masks */
+#define RM_ADCSR_ABCM                        RM(FM_ADCSR_ABCM, FB_ADCSR_ABCM)
+#define RM_ADCSR_ABR                         RM(FM_ADCSR_ABR, FB_ADCSR_ABR)
+#define RM_ADCSR_ABM                         RM(FM_ADCSR_ABM, FB_ADCSR_ABM)
+
+/* Register Values */
+#define RV_ADCSR_ABCM_AUTO \
+	 RV(FV_ADCSR_ABCM_AUTO, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_32 \
+	 RV(FV_ADCSR_ABCM_32, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_40 \
+	 RV(FV_ADCSR_ABCM_40, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_64 \
+	 RV(FV_ADCSR_ABCM_64, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABR_32                      RV(FV_ADCSR_ABR_32, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_44_1 \
+	 RV(FV_ADCSR_ABR_44_1, FB_ADCSR_ABR)
+
+#define RV_ADCSR_ABR_48                      RV(FV_ADCSR_ABR_48, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_                        RV(FV_ADCSR_ABR_, FB_ADCSR_ABR)
+#define RV_ADCSR_ABM_PT25 \
+	 RV(FV_ADCSR_ABM_PT25, FB_ADCSR_ABM)
+
+#define RV_ADCSR_ABM_PT5                     RV(FV_ADCSR_ABM_PT5, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_1                       RV(FV_ADCSR_ABM_1, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_2                       RV(FV_ADCSR_ABM_2, FB_ADCSR_ABM)
+
+/******************************
+ *      R_CNVRTR1 (0x18)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR1_DACPOLR                   7
+#define FB_CNVRTR1_DACPOLL                   6
+#define FB_CNVRTR1_DMONOMIX                  4
+#define FB_CNVRTR1_DACMU                     3
+#define FB_CNVRTR1_DEEMPH                    2
+#define FB_CNVRTR1_DACDITH                   0
+
+/* Field Masks */
+#define FM_CNVRTR1_DACPOLR                   0X1
+#define FM_CNVRTR1_DACPOLL                   0X1
+#define FM_CNVRTR1_DMONOMIX                  0X3
+#define FM_CNVRTR1_DACMU                     0X1
+#define FM_CNVRTR1_DEEMPH                    0X1
+#define FM_CNVRTR1_DACDITH                   0X3
+
+/* Field Values */
+#define FV_CNVRTR1_DACPOLR_INVERT            0x1
+#define FV_CNVRTR1_DACPOLR_NORMAL            0x0
+#define FV_CNVRTR1_DACPOLL_INVERT            0x1
+#define FV_CNVRTR1_DACPOLL_NORMAL            0x0
+#define FV_CNVRTR1_DMONOMIX_ENABLE           0x1
+#define FV_CNVRTR1_DMONOMIX_DISABLE          0x0
+#define FV_CNVRTR1_DACMU_ENABLE              0x1
+#define FV_CNVRTR1_DACMU_DISABLE             0x0
+
+/* Register Masks */
+#define RM_CNVRTR1_DACPOLR \
+	 RM(FM_CNVRTR1_DACPOLR, FB_CNVRTR1_DACPOLR)
+
+#define RM_CNVRTR1_DACPOLL \
+	 RM(FM_CNVRTR1_DACPOLL, FB_CNVRTR1_DACPOLL)
+
+#define RM_CNVRTR1_DMONOMIX \
+	 RM(FM_CNVRTR1_DMONOMIX, FB_CNVRTR1_DMONOMIX)
+
+#define RM_CNVRTR1_DACMU \
+	 RM(FM_CNVRTR1_DACMU, FB_CNVRTR1_DACMU)
+
+#define RM_CNVRTR1_DEEMPH \
+	 RM(FM_CNVRTR1_DEEMPH, FB_CNVRTR1_DEEMPH)
+
+#define RM_CNVRTR1_DACDITH \
+	 RM(FM_CNVRTR1_DACDITH, FB_CNVRTR1_DACDITH)
+
+
+/* Register Values */
+#define RV_CNVRTR1_DACPOLR_INVERT \
+	 RV(FV_CNVRTR1_DACPOLR_INVERT, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLR_NORMAL \
+	 RV(FV_CNVRTR1_DACPOLR_NORMAL, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLL_INVERT \
+	 RV(FV_CNVRTR1_DACPOLL_INVERT, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DACPOLL_NORMAL \
+	 RV(FV_CNVRTR1_DACPOLL_NORMAL, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DMONOMIX_ENABLE \
+	 RV(FV_CNVRTR1_DMONOMIX_ENABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DMONOMIX_DISABLE \
+	 RV(FV_CNVRTR1_DMONOMIX_DISABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DACMU_ENABLE \
+	 RV(FV_CNVRTR1_DACMU_ENABLE, FB_CNVRTR1_DACMU)
+
+#define RV_CNVRTR1_DACMU_DISABLE \
+	 RV(FV_CNVRTR1_DACMU_DISABLE, FB_CNVRTR1_DACMU)
+
+
+/****************************
+ *      R_DACSR (0x19)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_DACSR_DBCM                        6
+#define FB_DACSR_DBR                         3
+#define FB_DACSR_DBM                         0
+
+/* Field Masks */
+#define FM_DACSR_DBCM                        0X3
+#define FM_DACSR_DBR                         0X3
+#define FM_DACSR_DBM                         0X7
+
+/* Field Values */
+#define FV_DACSR_DBCM_AUTO                   0x0
+#define FV_DACSR_DBCM_32                     0x1
+#define FV_DACSR_DBCM_40                     0x2
+#define FV_DACSR_DBCM_64                     0x3
+#define FV_DACSR_DBR_32                      0x0
+#define FV_DACSR_DBR_44_1                    0x1
+#define FV_DACSR_DBR_48                      0x2
+#define FV_DACSR_DBM_PT25                    0x0
+#define FV_DACSR_DBM_PT5                     0x1
+#define FV_DACSR_DBM_1                       0x2
+#define FV_DACSR_DBM_2                       0x3
+
+/* Register Masks */
+#define RM_DACSR_DBCM                        RM(FM_DACSR_DBCM, FB_DACSR_DBCM)
+#define RM_DACSR_DBR                         RM(FM_DACSR_DBR, FB_DACSR_DBR)
+#define RM_DACSR_DBM                         RM(FM_DACSR_DBM, FB_DACSR_DBM)
+
+/* Register Values */
+#define RV_DACSR_DBCM_AUTO \
+	 RV(FV_DACSR_DBCM_AUTO, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_32 \
+	 RV(FV_DACSR_DBCM_32, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_40 \
+	 RV(FV_DACSR_DBCM_40, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_64 \
+	 RV(FV_DACSR_DBCM_64, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBR_32                      RV(FV_DACSR_DBR_32, FB_DACSR_DBR)
+#define RV_DACSR_DBR_44_1 \
+	 RV(FV_DACSR_DBR_44_1, FB_DACSR_DBR)
+
+#define RV_DACSR_DBR_48                      RV(FV_DACSR_DBR_48, FB_DACSR_DBR)
+#define RV_DACSR_DBM_PT25 \
+	 RV(FV_DACSR_DBM_PT25, FB_DACSR_DBM)
+
+#define RV_DACSR_DBM_PT5                     RV(FV_DACSR_DBM_PT5, FB_DACSR_DBM)
+#define RV_DACSR_DBM_1                       RV(FV_DACSR_DBM_1, FB_DACSR_DBM)
+#define RV_DACSR_DBM_2                       RV(FV_DACSR_DBM_2, FB_DACSR_DBM)
+
+/****************************
+ *      R_PWRM1 (0x1A)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM1_BSTL                        7
+#define FB_PWRM1_BSTR                        6
+#define FB_PWRM1_PGAL                        5
+#define FB_PWRM1_PGAR                        4
+#define FB_PWRM1_ADCL                        3
+#define FB_PWRM1_ADCR                        2
+#define FB_PWRM1_MICB                        1
+#define FB_PWRM1_DIGENB                      0
+
+/* Field Masks */
+#define FM_PWRM1_BSTL                        0X1
+#define FM_PWRM1_BSTR                        0X1
+#define FM_PWRM1_PGAL                        0X1
+#define FM_PWRM1_PGAR                        0X1
+#define FM_PWRM1_ADCL                        0X1
+#define FM_PWRM1_ADCR                        0X1
+#define FM_PWRM1_MICB                        0X1
+#define FM_PWRM1_DIGENB                      0X1
+
+/* Field Values */
+#define FV_PWRM1_BSTL_ENABLE                 0x1
+#define FV_PWRM1_BSTL_DISABLE                0x0
+#define FV_PWRM1_BSTR_ENABLE                 0x1
+#define FV_PWRM1_BSTR_DISABLE                0x0
+#define FV_PWRM1_PGAL_ENABLE                 0x1
+#define FV_PWRM1_PGAL_DISABLE                0x0
+#define FV_PWRM1_PGAR_ENABLE                 0x1
+#define FV_PWRM1_PGAR_DISABLE                0x0
+#define FV_PWRM1_ADCL_ENABLE                 0x1
+#define FV_PWRM1_ADCL_DISABLE                0x0
+#define FV_PWRM1_ADCR_ENABLE                 0x1
+#define FV_PWRM1_ADCR_DISABLE                0x0
+#define FV_PWRM1_MICB_ENABLE                 0x1
+#define FV_PWRM1_MICB_DISABLE                0x0
+#define FV_PWRM1_DIGENB_DISABLE              0x1
+#define FV_PWRM1_DIGENB_ENABLE               0x0
+
+/* Register Masks */
+#define RM_PWRM1_BSTL                        RM(FM_PWRM1_BSTL, FB_PWRM1_BSTL)
+#define RM_PWRM1_BSTR                        RM(FM_PWRM1_BSTR, FB_PWRM1_BSTR)
+#define RM_PWRM1_PGAL                        RM(FM_PWRM1_PGAL, FB_PWRM1_PGAL)
+#define RM_PWRM1_PGAR                        RM(FM_PWRM1_PGAR, FB_PWRM1_PGAR)
+#define RM_PWRM1_ADCL                        RM(FM_PWRM1_ADCL, FB_PWRM1_ADCL)
+#define RM_PWRM1_ADCR                        RM(FM_PWRM1_ADCR, FB_PWRM1_ADCR)
+#define RM_PWRM1_MICB                        RM(FM_PWRM1_MICB, FB_PWRM1_MICB)
+#define RM_PWRM1_DIGENB \
+	 RM(FM_PWRM1_DIGENB, FB_PWRM1_DIGENB)
+
+
+/* Register Values */
+#define RV_PWRM1_BSTL_ENABLE \
+	 RV(FV_PWRM1_BSTL_ENABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTL_DISABLE \
+	 RV(FV_PWRM1_BSTL_DISABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTR_ENABLE \
+	 RV(FV_PWRM1_BSTR_ENABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_BSTR_DISABLE \
+	 RV(FV_PWRM1_BSTR_DISABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_PGAL_ENABLE \
+	 RV(FV_PWRM1_PGAL_ENABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAL_DISABLE \
+	 RV(FV_PWRM1_PGAL_DISABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAR_ENABLE \
+	 RV(FV_PWRM1_PGAR_ENABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_PGAR_DISABLE \
+	 RV(FV_PWRM1_PGAR_DISABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_ADCL_ENABLE \
+	 RV(FV_PWRM1_ADCL_ENABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCL_DISABLE \
+	 RV(FV_PWRM1_ADCL_DISABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCR_ENABLE \
+	 RV(FV_PWRM1_ADCR_ENABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_ADCR_DISABLE \
+	 RV(FV_PWRM1_ADCR_DISABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_MICB_ENABLE \
+	 RV(FV_PWRM1_MICB_ENABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_MICB_DISABLE \
+	 RV(FV_PWRM1_MICB_DISABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_DIGENB_DISABLE \
+	 RV(FV_PWRM1_DIGENB_DISABLE, FB_PWRM1_DIGENB)
+
+#define RV_PWRM1_DIGENB_ENABLE \
+	 RV(FV_PWRM1_DIGENB_ENABLE, FB_PWRM1_DIGENB)
+
+
+/****************************
+ *      R_PWRM2 (0x1B)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM2_D2S                         7
+#define FB_PWRM2_HPL                         6
+#define FB_PWRM2_HPR                         5
+#define FB_PWRM2_SPKL                        4
+#define FB_PWRM2_SPKR                        3
+#define FB_PWRM2_INSELL                      2
+#define FB_PWRM2_INSELR                      1
+#define FB_PWRM2_VREF                        0
+
+/* Field Masks */
+#define FM_PWRM2_D2S                         0X1
+#define FM_PWRM2_HPL                         0X1
+#define FM_PWRM2_HPR                         0X1
+#define FM_PWRM2_SPKL                        0X1
+#define FM_PWRM2_SPKR                        0X1
+#define FM_PWRM2_INSELL                      0X1
+#define FM_PWRM2_INSELR                      0X1
+#define FM_PWRM2_VREF                        0X1
+
+/* Field Values */
+#define FV_PWRM2_D2S_ENABLE                  0x1
+#define FV_PWRM2_D2S_DISABLE                 0x0
+#define FV_PWRM2_HPL_ENABLE                  0x1
+#define FV_PWRM2_HPL_DISABLE                 0x0
+#define FV_PWRM2_HPR_ENABLE                  0x1
+#define FV_PWRM2_HPR_DISABLE                 0x0
+#define FV_PWRM2_SPKL_ENABLE                 0x1
+#define FV_PWRM2_SPKL_DISABLE                0x0
+#define FV_PWRM2_SPKR_ENABLE                 0x1
+#define FV_PWRM2_SPKR_DISABLE                0x0
+#define FV_PWRM2_INSELL_ENABLE               0x1
+#define FV_PWRM2_INSELL_DISABLE              0x0
+#define FV_PWRM2_INSELR_ENABLE               0x1
+#define FV_PWRM2_INSELR_DISABLE              0x0
+#define FV_PWRM2_VREF_ENABLE                 0x1
+#define FV_PWRM2_VREF_DISABLE                0x0
+
+/* Register Masks */
+#define RM_PWRM2_D2S                         RM(FM_PWRM2_D2S, FB_PWRM2_D2S)
+#define RM_PWRM2_HPL                         RM(FM_PWRM2_HPL, FB_PWRM2_HPL)
+#define RM_PWRM2_HPR                         RM(FM_PWRM2_HPR, FB_PWRM2_HPR)
+#define RM_PWRM2_SPKL                        RM(FM_PWRM2_SPKL, FB_PWRM2_SPKL)
+#define RM_PWRM2_SPKR                        RM(FM_PWRM2_SPKR, FB_PWRM2_SPKR)
+#define RM_PWRM2_INSELL \
+	 RM(FM_PWRM2_INSELL, FB_PWRM2_INSELL)
+
+#define RM_PWRM2_INSELR \
+	 RM(FM_PWRM2_INSELR, FB_PWRM2_INSELR)
+
+#define RM_PWRM2_VREF                        RM(FM_PWRM2_VREF, FB_PWRM2_VREF)
+
+/* Register Values */
+#define RV_PWRM2_D2S_ENABLE \
+	 RV(FV_PWRM2_D2S_ENABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_D2S_DISABLE \
+	 RV(FV_PWRM2_D2S_DISABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_HPL_ENABLE \
+	 RV(FV_PWRM2_HPL_ENABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPL_DISABLE \
+	 RV(FV_PWRM2_HPL_DISABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPR_ENABLE \
+	 RV(FV_PWRM2_HPR_ENABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_HPR_DISABLE \
+	 RV(FV_PWRM2_HPR_DISABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_SPKL_ENABLE \
+	 RV(FV_PWRM2_SPKL_ENABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKL_DISABLE \
+	 RV(FV_PWRM2_SPKL_DISABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKR_ENABLE \
+	 RV(FV_PWRM2_SPKR_ENABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_SPKR_DISABLE \
+	 RV(FV_PWRM2_SPKR_DISABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_INSELL_ENABLE \
+	 RV(FV_PWRM2_INSELL_ENABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELL_DISABLE \
+	 RV(FV_PWRM2_INSELL_DISABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELR_ENABLE \
+	 RV(FV_PWRM2_INSELR_ENABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_INSELR_DISABLE \
+	 RV(FV_PWRM2_INSELR_DISABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_VREF_ENABLE \
+	 RV(FV_PWRM2_VREF_ENABLE, FB_PWRM2_VREF)
+
+#define RV_PWRM2_VREF_DISABLE \
+	 RV(FV_PWRM2_VREF_DISABLE, FB_PWRM2_VREF)
+
+
+/******************************
+ *      R_CONFIG0 (0x1F)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG0_ASDM                      6
+#define FB_CONFIG0_DSDM                      4
+#define FB_CONFIG0_DC_BYPASS                 1
+#define FB_CONFIG0_SD_FORCE_ON               0
+
+/* Field Masks */
+#define FM_CONFIG0_ASDM                      0X3
+#define FM_CONFIG0_DSDM                      0X3
+#define FM_CONFIG0_DC_BYPASS                 0X1
+#define FM_CONFIG0_SD_FORCE_ON               0X1
+
+/* Field Values */
+#define FV_CONFIG0_ASDM_HALF                 0x1
+#define FV_CONFIG0_ASDM_FULL                 0x2
+#define FV_CONFIG0_ASDM_AUTO                 0x3
+#define FV_CONFIG0_DSDM_HALF                 0x1
+#define FV_CONFIG0_DSDM_FULL                 0x2
+#define FV_CONFIG0_DSDM_AUTO                 0x3
+#define FV_CONFIG0_DC_BYPASS_ENABLE          0x1
+#define FV_CONFIG0_DC_BYPASS_DISABLE         0x0
+#define FV_CONFIG0_SD_FORCE_ON_ENABLE        0x1
+#define FV_CONFIG0_SD_FORCE_ON_DISABLE       0x0
+
+/* Register Masks */
+#define RM_CONFIG0_ASDM \
+	 RM(FM_CONFIG0_ASDM, FB_CONFIG0_ASDM)
+
+#define RM_CONFIG0_DSDM \
+	 RM(FM_CONFIG0_DSDM, FB_CONFIG0_DSDM)
+
+#define RM_CONFIG0_DC_BYPASS \
+	 RM(FM_CONFIG0_DC_BYPASS, FB_CONFIG0_DC_BYPASS)
+
+#define RM_CONFIG0_SD_FORCE_ON \
+	 RM(FM_CONFIG0_SD_FORCE_ON, FB_CONFIG0_SD_FORCE_ON)
+
+
+/* Register Values */
+#define RV_CONFIG0_ASDM_HALF \
+	 RV(FV_CONFIG0_ASDM_HALF, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_FULL \
+	 RV(FV_CONFIG0_ASDM_FULL, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_AUTO \
+	 RV(FV_CONFIG0_ASDM_AUTO, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_DSDM_HALF \
+	 RV(FV_CONFIG0_DSDM_HALF, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_FULL \
+	 RV(FV_CONFIG0_DSDM_FULL, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_AUTO \
+	 RV(FV_CONFIG0_DSDM_AUTO, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DC_BYPASS_ENABLE \
+	 RV(FV_CONFIG0_DC_BYPASS_ENABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_DC_BYPASS_DISABLE \
+	 RV(FV_CONFIG0_DC_BYPASS_DISABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_SD_FORCE_ON_ENABLE \
+	 RV(FV_CONFIG0_SD_FORCE_ON_ENABLE, FB_CONFIG0_SD_FORCE_ON)
+
+#define RV_CONFIG0_SD_FORCE_ON_DISABLE \
+	 RV(FV_CONFIG0_SD_FORCE_ON_DISABLE, FB_CONFIG0_SD_FORCE_ON)
+
+
+/******************************
+ *      R_CONFIG1 (0x20)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG1_EQ2_EN                    7
+#define FB_CONFIG1_EQ2_BE                    4
+#define FB_CONFIG1_EQ1_EN                    3
+#define FB_CONFIG1_EQ1_BE                    0
+
+/* Field Masks */
+#define FM_CONFIG1_EQ2_EN                    0X1
+#define FM_CONFIG1_EQ2_BE                    0X7
+#define FM_CONFIG1_EQ1_EN                    0X1
+#define FM_CONFIG1_EQ1_BE                    0X7
+
+/* Field Values */
+#define FV_CONFIG1_EQ2_EN_ENABLE             0x1
+#define FV_CONFIG1_EQ2_EN_DISABLE            0x0
+#define FV_CONFIG1_EQ2_BE_PRE                0x0
+#define FV_CONFIG1_EQ2_BE_PRE_EQ_0           0x1
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_1          0x2
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_2          0x3
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_3          0x4
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_4          0x5
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_5          0x6
+#define FV_CONFIG1_EQ1_EN_ENABLE             0x1
+#define FV_CONFIG1_EQ1_EN_DISABLE            0x0
+#define FV_CONFIG1_EQ1_BE_PRE                0x0
+#define FV_CONFIG1_EQ1_BE_PRE_EQ_0           0x1
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_1          0x2
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_2          0x3
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_3          0x4
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_4          0x5
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_5          0x6
+
+/* Register Masks */
+#define RM_CONFIG1_EQ2_EN \
+	 RM(FM_CONFIG1_EQ2_EN, FB_CONFIG1_EQ2_EN)
+
+#define RM_CONFIG1_EQ2_BE \
+	 RM(FM_CONFIG1_EQ2_BE, FB_CONFIG1_EQ2_BE)
+
+#define RM_CONFIG1_EQ1_EN \
+	 RM(FM_CONFIG1_EQ1_EN, FB_CONFIG1_EQ1_EN)
+
+#define RM_CONFIG1_EQ1_BE \
+	 RM(FM_CONFIG1_EQ1_BE, FB_CONFIG1_EQ1_BE)
+
+
+/* Register Values */
+#define RV_CONFIG1_EQ2_EN_ENABLE \
+	 RV(FV_CONFIG1_EQ2_EN_ENABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_EN_DISABLE \
+	 RV(FV_CONFIG1_EQ2_EN_DISABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_BE_PRE \
+	 RV(FV_CONFIG1_EQ2_BE_PRE, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ_0 \
+	 RV(FV_CONFIG1_EQ2_BE_PRE_EQ_0, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_1 \
+	 RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_1, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_2 \
+	 RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_2, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_3 \
+	 RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_3, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_4 \
+	 RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_4, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_5 \
+	 RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_5, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ1_EN_ENABLE \
+	 RV(FV_CONFIG1_EQ1_EN_ENABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_EN_DISABLE \
+	 RV(FV_CONFIG1_EQ1_EN_DISABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_BE_PRE \
+	 RV(FV_CONFIG1_EQ1_BE_PRE, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ_0 \
+	 RV(FV_CONFIG1_EQ1_BE_PRE_EQ_0, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_1 \
+	 RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_1, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_2 \
+	 RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_2, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_3 \
+	 RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_3, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_4 \
+	 RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_4, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_5 \
+	 RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_5, FB_CONFIG1_EQ1_BE)
+
+
+/******************************
+ *      R_DMICCTL (0x24)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DMICCTL_DMICEN                    7
+#define FB_DMICCTL_DMONO                     4
+#define FB_DMICCTL_DMPHADJ                   2
+#define FB_DMICCTL_DMRATE                    0
+
+/* Field Masks */
+#define FM_DMICCTL_DMICEN                    0X1
+#define FM_DMICCTL_DMONO                     0X1
+#define FM_DMICCTL_DMPHADJ                   0X3
+#define FM_DMICCTL_DMRATE                    0X3
+
+/* Field Values */
+#define FV_DMICCTL_DMICEN_ENABLE             0x1
+#define FV_DMICCTL_DMICEN_DISABLE            0x0
+#define FV_DMICCTL_DMONO_STEREO              0x0
+#define FV_DMICCTL_DMONO_MONO                0x1
+
+/* Register Masks */
+#define RM_DMICCTL_DMICEN \
+	 RM(FM_DMICCTL_DMICEN, FB_DMICCTL_DMICEN)
+
+#define RM_DMICCTL_DMONO \
+	 RM(FM_DMICCTL_DMONO, FB_DMICCTL_DMONO)
+
+#define RM_DMICCTL_DMPHADJ \
+	 RM(FM_DMICCTL_DMPHADJ, FB_DMICCTL_DMPHADJ)
+
+#define RM_DMICCTL_DMRATE \
+	 RM(FM_DMICCTL_DMRATE, FB_DMICCTL_DMRATE)
+
+
+/* Register Values */
+#define RV_DMICCTL_DMICEN_ENABLE \
+	 RV(FV_DMICCTL_DMICEN_ENABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMICEN_DISABLE \
+	 RV(FV_DMICCTL_DMICEN_DISABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMONO_STEREO \
+	 RV(FV_DMICCTL_DMONO_STEREO, FB_DMICCTL_DMONO)
+
+#define RV_DMICCTL_DMONO_MONO \
+	 RV(FV_DMICCTL_DMONO_MONO, FB_DMICCTL_DMONO)
+
+
+/*****************************
+ *      R_CLECTL (0x25)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CLECTL_LVL_MODE                   4
+#define FB_CLECTL_WINDOWSEL                  3
+#define FB_CLECTL_EXP_EN                     2
+#define FB_CLECTL_LIMIT_EN                   1
+#define FB_CLECTL_COMP_EN                    0
+
+/* Field Masks */
+#define FM_CLECTL_LVL_MODE                   0X1
+#define FM_CLECTL_WINDOWSEL                  0X1
+#define FM_CLECTL_EXP_EN                     0X1
+#define FM_CLECTL_LIMIT_EN                   0X1
+#define FM_CLECTL_COMP_EN                    0X1
+
+/* Field Values */
+#define FV_CLECTL_LVL_MODE_AVG               0x0
+#define FV_CLECTL_LVL_MODE_PEAK              0x1
+#define FV_CLECTL_WINDOWSEL_512              0x0
+#define FV_CLECTL_WINDOWSEL_64               0x1
+#define FV_CLECTL_EXP_EN_ENABLE              0x1
+#define FV_CLECTL_EXP_EN_DISABLE             0x0
+#define FV_CLECTL_LIMIT_EN_ENABLE            0x1
+#define FV_CLECTL_LIMIT_EN_DISABLE           0x0
+#define FV_CLECTL_COMP_EN_ENABLE             0x1
+#define FV_CLECTL_COMP_EN_DISABLE            0x0
+
+/* Register Masks */
+#define RM_CLECTL_LVL_MODE \
+	 RM(FM_CLECTL_LVL_MODE, FB_CLECTL_LVL_MODE)
+
+#define RM_CLECTL_WINDOWSEL \
+	 RM(FM_CLECTL_WINDOWSEL, FB_CLECTL_WINDOWSEL)
+
+#define RM_CLECTL_EXP_EN \
+	 RM(FM_CLECTL_EXP_EN, FB_CLECTL_EXP_EN)
+
+#define RM_CLECTL_LIMIT_EN \
+	 RM(FM_CLECTL_LIMIT_EN, FB_CLECTL_LIMIT_EN)
+
+#define RM_CLECTL_COMP_EN \
+	 RM(FM_CLECTL_COMP_EN, FB_CLECTL_COMP_EN)
+
+
+/* Register Values */
+#define RV_CLECTL_LVL_MODE_AVG \
+	 RV(FV_CLECTL_LVL_MODE_AVG, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_LVL_MODE_PEAK \
+	 RV(FV_CLECTL_LVL_MODE_PEAK, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_WINDOWSEL_512 \
+	 RV(FV_CLECTL_WINDOWSEL_512, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_WINDOWSEL_64 \
+	 RV(FV_CLECTL_WINDOWSEL_64, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_EXP_EN_ENABLE \
+	 RV(FV_CLECTL_EXP_EN_ENABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_EXP_EN_DISABLE \
+	 RV(FV_CLECTL_EXP_EN_DISABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_LIMIT_EN_ENABLE \
+	 RV(FV_CLECTL_LIMIT_EN_ENABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_LIMIT_EN_DISABLE \
+	 RV(FV_CLECTL_LIMIT_EN_DISABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_COMP_EN_ENABLE \
+	 RV(FV_CLECTL_COMP_EN_ENABLE, FB_CLECTL_COMP_EN)
+
+#define RV_CLECTL_COMP_EN_DISABLE \
+	 RV(FV_CLECTL_COMP_EN_DISABLE, FB_CLECTL_COMP_EN)
+
+
+/*****************************
+ *      R_MUGAIN (0x26)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_MUGAIN_CLEMUG                     0
+
+/* Field Masks */
+#define FM_MUGAIN_CLEMUG                     0X1F
+
+/* Field Values */
+#define FV_MUGAIN_CLEMUG_46PT5DB             0x1F
+#define FV_MUGAIN_CLEMUG_0DB                 0x0
+
+/* Register Masks */
+#define RM_MUGAIN_CLEMUG \
+	 RM(FM_MUGAIN_CLEMUG, FB_MUGAIN_CLEMUG)
+
+
+/* Register Values */
+#define RV_MUGAIN_CLEMUG_46PT5DB \
+	 RV(FV_MUGAIN_CLEMUG_46PT5DB, FB_MUGAIN_CLEMUG)
+
+#define RV_MUGAIN_CLEMUG_0DB \
+	 RV(FV_MUGAIN_CLEMUG_0DB, FB_MUGAIN_CLEMUG)
+
+
+/*****************************
+ *      R_COMPTH (0x27)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_COMPTH                            0
+
+/* Field Masks */
+#define FM_COMPTH                            0XFF
+
+/* Field Values */
+#define FV_COMPTH_0DB                        0xFF
+#define FV_COMPTH_N95PT625DB                 0x0
+
+/* Register Masks */
+#define RM_COMPTH                            RM(FM_COMPTH, FB_COMPTH)
+
+/* Register Values */
+#define RV_COMPTH_0DB                        RV(FV_COMPTH_0DB, FB_COMPTH)
+#define RV_COMPTH_N95PT625DB \
+	 RV(FV_COMPTH_N95PT625DB, FB_COMPTH)
+
+
+/*****************************
+ *      R_CMPRAT (0x28)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CMPRAT                            0
+
+/* Field Masks */
+#define FM_CMPRAT                            0X1F
+
+/* Register Masks */
+#define RM_CMPRAT                            RM(FM_CMPRAT, FB_CMPRAT)
+
+/******************************
+ *      R_CATKTCL (0x29)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCL                           0
+
+/* Field Masks */
+#define FM_CATKTCL                           0XFF
+
+/* Register Masks */
+#define RM_CATKTCL                           RM(FM_CATKTCL, FB_CATKTCL)
+
+/******************************
+ *      R_CATKTCH (0x2A)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCH                           0
+
+/* Field Masks */
+#define FM_CATKTCH                           0XFF
+
+/* Register Masks */
+#define RM_CATKTCH                           RM(FM_CATKTCH, FB_CATKTCH)
+
+/******************************
+ *      R_CRELTCL (0x2B)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCL                           0
+
+/* Field Masks */
+#define FM_CRELTCL                           0XFF
+
+/* Register Masks */
+#define RM_CRELTCL                           RM(FM_CRELTCL, FB_CRELTCL)
+
+/******************************
+ *      R_CRELTCH (0x2C)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCH                           0
+
+/* Field Masks */
+#define FM_CRELTCH                           0XFF
+
+/* Register Masks */
+#define RM_CRELTCH                           RM(FM_CRELTCH, FB_CRELTCH)
+
+/****************************
+ *      R_LIMTH (0x2D)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_LIMTH                             0
+
+/* Field Masks */
+#define FM_LIMTH                             0XFF
+
+/* Field Values */
+#define FV_LIMTH_0DB                         0xFF
+#define FV_LIMTH_N95PT625DB                  0x0
+
+/* Register Masks */
+#define RM_LIMTH                             RM(FM_LIMTH, FB_LIMTH)
+
+/* Register Values */
+#define RV_LIMTH_0DB                         RV(FV_LIMTH_0DB, FB_LIMTH)
+#define RV_LIMTH_N95PT625DB                  RV(FV_LIMTH_N95PT625DB, FB_LIMTH)
+
+/*****************************
+ *      R_LIMTGT (0x2E)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_LIMTGT                            0
+
+/* Field Masks */
+#define FM_LIMTGT                            0XFF
+
+/* Field Values */
+#define FV_LIMTGT_0DB                        0xFF
+#define FV_LIMTGT_N95PT625DB                 0x0
+
+/* Register Masks */
+#define RM_LIMTGT                            RM(FM_LIMTGT, FB_LIMTGT)
+
+/* Register Values */
+#define RV_LIMTGT_0DB                        RV(FV_LIMTGT_0DB, FB_LIMTGT)
+#define RV_LIMTGT_N95PT625DB \
+	 RV(FV_LIMTGT_N95PT625DB, FB_LIMTGT)
+
+
+/******************************
+ *      R_LATKTCL (0x2F)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCL                           0
+
+/* Field Masks */
+#define FM_LATKTCL                           0XFF
+
+/* Register Masks */
+#define RM_LATKTCL                           RM(FM_LATKTCL, FB_LATKTCL)
+
+/******************************
+ *      R_LATKTCH (0x30)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCH                           0
+
+/* Field Masks */
+#define FM_LATKTCH                           0XFF
+
+/* Register Masks */
+#define RM_LATKTCH                           RM(FM_LATKTCH, FB_LATKTCH)
+
+/******************************
+ *      R_LRELTCL (0x31)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCL                           0
+
+/* Field Masks */
+#define FM_LRELTCL                           0XFF
+
+/* Register Masks */
+#define RM_LRELTCL                           RM(FM_LRELTCL, FB_LRELTCL)
+
+/******************************
+ *      R_LRELTCH (0x32)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCH                           0
+
+/* Field Masks */
+#define FM_LRELTCH                           0XFF
+
+/* Register Masks */
+#define RM_LRELTCH                           RM(FM_LRELTCH, FB_LRELTCH)
+
+/****************************
+ *      R_EXPTH (0x33)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_EXPTH                             0
+
+/* Field Masks */
+#define FM_EXPTH                             0XFF
+
+/* Field Values */
+#define FV_EXPTH_0DB                         0xFF
+#define FV_EXPTH_N95PT625DB                  0x0
+
+/* Register Masks */
+#define RM_EXPTH                             RM(FM_EXPTH, FB_EXPTH)
+
+/* Register Values */
+#define RV_EXPTH_0DB                         RV(FV_EXPTH_0DB, FB_EXPTH)
+#define RV_EXPTH_N95PT625DB                  RV(FV_EXPTH_N95PT625DB, FB_EXPTH)
+
+/*****************************
+ *      R_EXPRAT (0x34)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_EXPRAT                            0
+
+/* Field Masks */
+#define FM_EXPRAT                            0X7
+
+/* Register Masks */
+#define RM_EXPRAT                            RM(FM_EXPRAT, FB_EXPRAT)
+
+/******************************
+ *      R_XATKTCL (0x35)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCL                           0
+
+/* Field Masks */
+#define FM_XATKTCL                           0XFF
+
+/* Register Masks */
+#define RM_XATKTCL                           RM(FM_XATKTCL, FB_XATKTCL)
+
+/******************************
+ *      R_XATKTCH (0x36)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCH                           0
+
+/* Field Masks */
+#define FM_XATKTCH                           0XFF
+
+/* Register Masks */
+#define RM_XATKTCH                           RM(FM_XATKTCH, FB_XATKTCH)
+
+/******************************
+ *      R_XRELTCL (0x37)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCL                           0
+
+/* Field Masks */
+#define FM_XRELTCL                           0XFF
+
+/* Register Masks */
+#define RM_XRELTCL                           RM(FM_XRELTCL, FB_XRELTCL)
+
+/******************************
+ *      R_XRELTCH (0x38)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCH                           0
+
+/* Field Masks */
+#define FM_XRELTCH                           0XFF
+
+/* Register Masks */
+#define RM_XRELTCH                           RM(FM_XRELTCH, FB_XRELTCH)
+
+/****************************
+ *      R_FXCTL (0x39)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_FXCTL_3DEN                        4
+#define FB_FXCTL_TEEN                        3
+#define FB_FXCTL_TNLFBYPASS                  2
+#define FB_FXCTL_BEEN                        1
+#define FB_FXCTL_BNLFBYPASS                  0
+
+/* Field Masks */
+#define FM_FXCTL_3DEN                        0X1
+#define FM_FXCTL_TEEN                        0X1
+#define FM_FXCTL_TNLFBYPASS                  0X1
+#define FM_FXCTL_BEEN                        0X1
+#define FM_FXCTL_BNLFBYPASS                  0X1
+
+/* Field Values */
+#define FV_FXCTL_3DEN_ENABLE                 0x1
+#define FV_FXCTL_3DEN_DISABLE                0x0
+#define FV_FXCTL_TEEN_ENABLE                 0x1
+#define FV_FXCTL_TEEN_DISABLE                0x0
+#define FV_FXCTL_TNLFBYPASS_ENABLE           0x1
+#define FV_FXCTL_TNLFBYPASS_DISABLE          0x0
+#define FV_FXCTL_BEEN_ENABLE                 0x1
+#define FV_FXCTL_BEEN_DISABLE                0x0
+#define FV_FXCTL_BNLFBYPASS_ENABLE           0x1
+#define FV_FXCTL_BNLFBYPASS_DISABLE          0x0
+
+/* Register Masks */
+#define RM_FXCTL_3DEN                        RM(FM_FXCTL_3DEN, FB_FXCTL_3DEN)
+#define RM_FXCTL_TEEN                        RM(FM_FXCTL_TEEN, FB_FXCTL_TEEN)
+#define RM_FXCTL_TNLFBYPASS \
+	 RM(FM_FXCTL_TNLFBYPASS, FB_FXCTL_TNLFBYPASS)
+
+#define RM_FXCTL_BEEN                        RM(FM_FXCTL_BEEN, FB_FXCTL_BEEN)
+#define RM_FXCTL_BNLFBYPASS \
+	 RM(FM_FXCTL_BNLFBYPASS, FB_FXCTL_BNLFBYPASS)
+
+
+/* Register Values */
+#define RV_FXCTL_3DEN_ENABLE \
+	 RV(FV_FXCTL_3DEN_ENABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_3DEN_DISABLE \
+	 RV(FV_FXCTL_3DEN_DISABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_TEEN_ENABLE \
+	 RV(FV_FXCTL_TEEN_ENABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TEEN_DISABLE \
+	 RV(FV_FXCTL_TEEN_DISABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TNLFBYPASS_ENABLE \
+	 RV(FV_FXCTL_TNLFBYPASS_ENABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_TNLFBYPASS_DISABLE \
+	 RV(FV_FXCTL_TNLFBYPASS_DISABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_BEEN_ENABLE \
+	 RV(FV_FXCTL_BEEN_ENABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BEEN_DISABLE \
+	 RV(FV_FXCTL_BEEN_DISABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BNLFBYPASS_ENABLE \
+	 RV(FV_FXCTL_BNLFBYPASS_ENABLE, FB_FXCTL_BNLFBYPASS)
+
+#define RV_FXCTL_BNLFBYPASS_DISABLE \
+	 RV(FV_FXCTL_BNLFBYPASS_DISABLE, FB_FXCTL_BNLFBYPASS)
+
+
+/*******************************
+ *      R_DACCRWRL (0x3A)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRL_DACCRWDL                 0
+
+/* Field Masks */
+#define FM_DACCRWRL_DACCRWDL                 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRL_DACCRWDL \
+	 RM(FM_DACCRWRL_DACCRWDL, FB_DACCRWRL_DACCRWDL)
+
+
+/*******************************
+ *      R_DACCRWRM (0x3B)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRM_DACCRWDM                 0
+
+/* Field Masks */
+#define FM_DACCRWRM_DACCRWDM                 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRM_DACCRWDM \
+	 RM(FM_DACCRWRM_DACCRWDM, FB_DACCRWRM_DACCRWDM)
+
+
+/*******************************
+ *      R_DACCRWRH (0x3C)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRH_DACCRWDH                 0
+
+/* Field Masks */
+#define FM_DACCRWRH_DACCRWDH                 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRH_DACCRWDH \
+	 RM(FM_DACCRWRH_DACCRWDH, FB_DACCRWRH_DACCRWDH)
+
+
+/*******************************
+ *      R_DACCRRDL (0x3D)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDL                          0
+
+/* Field Masks */
+#define FM_DACCRRDL                          0XFF
+
+/* Register Masks */
+#define RM_DACCRRDL                          RM(FM_DACCRRDL, FB_DACCRRDL)
+
+/*******************************
+ *      R_DACCRRDM (0x3E)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDM                          0
+
+/* Field Masks */
+#define FM_DACCRRDM                          0XFF
+
+/* Register Masks */
+#define RM_DACCRRDM                          RM(FM_DACCRRDM, FB_DACCRRDM)
+
+/*******************************
+ *      R_DACCRRDH (0x3F)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDH                          0
+
+/* Field Masks */
+#define FM_DACCRRDH                          0XFF
+
+/* Register Masks */
+#define RM_DACCRRDH                          RM(FM_DACCRRDH, FB_DACCRRDH)
+
+/********************************
+ *      R_DACCRADDR (0x40)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRADDR_DACCRADD                0
+
+/* Field Masks */
+#define FM_DACCRADDR_DACCRADD                0XFF
+
+/* Register Masks */
+#define RM_DACCRADDR_DACCRADD \
+	 RM(FM_DACCRADDR_DACCRADD, FB_DACCRADDR_DACCRADD)
+
+
+/******************************
+ *      R_DCOFSEL (0x41)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DCOFSEL_DC_COEF_SEL               0
+
+/* Field Masks */
+#define FM_DCOFSEL_DC_COEF_SEL               0X7
+
+/* Field Values */
+#define FV_DCOFSEL_DC_COEF_SEL_2_N8          0x0
+#define FV_DCOFSEL_DC_COEF_SEL_2_N9          0x1
+#define FV_DCOFSEL_DC_COEF_SEL_2_N10         0x2
+#define FV_DCOFSEL_DC_COEF_SEL_2_N11         0x3
+#define FV_DCOFSEL_DC_COEF_SEL_2_N12         0x4
+#define FV_DCOFSEL_DC_COEF_SEL_2_N13         0x5
+#define FV_DCOFSEL_DC_COEF_SEL_2_N14         0x6
+#define FV_DCOFSEL_DC_COEF_SEL_2_N15         0x7
+
+/* Register Masks */
+#define RM_DCOFSEL_DC_COEF_SEL \
+	 RM(FM_DCOFSEL_DC_COEF_SEL, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/* Register Values */
+#define RV_DCOFSEL_DC_COEF_SEL_2_N8 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N8, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N9 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N9, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N10 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N10, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N11 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N11, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N12 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N12, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N13 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N13, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N14 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N14, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N15 \
+	 RV(FV_DCOFSEL_DC_COEF_SEL_2_N15, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/******************************
+ *      R_PLLCTL9 (0x4E)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL9_REFDIV_PLL1               0
+
+/* Field Masks */
+#define FM_PLLCTL9_REFDIV_PLL1               0XFF
+
+/* Register Masks */
+#define RM_PLLCTL9_REFDIV_PLL1 \
+	 RM(FM_PLLCTL9_REFDIV_PLL1, FB_PLLCTL9_REFDIV_PLL1)
+
+
+/******************************
+ *      R_PLLCTLA (0x4F)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLA_OUTDIV_PLL1               0
+
+/* Field Masks */
+#define FM_PLLCTLA_OUTDIV_PLL1               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLA_OUTDIV_PLL1 \
+	 RM(FM_PLLCTLA_OUTDIV_PLL1, FB_PLLCTLA_OUTDIV_PLL1)
+
+
+/******************************
+ *      R_PLLCTLB (0x50)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLB_FBDIV_PLL1L               0
+
+/* Field Masks */
+#define FM_PLLCTLB_FBDIV_PLL1L               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLB_FBDIV_PLL1L \
+	 RM(FM_PLLCTLB_FBDIV_PLL1L, FB_PLLCTLB_FBDIV_PLL1L)
+
+
+/******************************
+ *      R_PLLCTLC (0x51)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLC_FBDIV_PLL1H               0
+
+/* Field Masks */
+#define FM_PLLCTLC_FBDIV_PLL1H               0X7
+
+/* Register Masks */
+#define RM_PLLCTLC_FBDIV_PLL1H \
+	 RM(FM_PLLCTLC_FBDIV_PLL1H, FB_PLLCTLC_FBDIV_PLL1H)
+
+
+/******************************
+ *      R_PLLCTLD (0x52)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLD_RZ_PLL1                   3
+#define FB_PLLCTLD_CP_PLL1                   0
+
+/* Field Masks */
+#define FM_PLLCTLD_RZ_PLL1                   0X7
+#define FM_PLLCTLD_CP_PLL1                   0X7
+
+/* Register Masks */
+#define RM_PLLCTLD_RZ_PLL1 \
+	 RM(FM_PLLCTLD_RZ_PLL1, FB_PLLCTLD_RZ_PLL1)
+
+#define RM_PLLCTLD_CP_PLL1 \
+	 RM(FM_PLLCTLD_CP_PLL1, FB_PLLCTLD_CP_PLL1)
+
+
+/******************************
+ *      R_PLLCTLE (0x53)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLE_REFDIV_PLL2               0
+
+/* Field Masks */
+#define FM_PLLCTLE_REFDIV_PLL2               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLE_REFDIV_PLL2 \
+	 RM(FM_PLLCTLE_REFDIV_PLL2, FB_PLLCTLE_REFDIV_PLL2)
+
+
+/******************************
+ *      R_PLLCTLF (0x54)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLF_OUTDIV_PLL2               0
+
+/* Field Masks */
+#define FM_PLLCTLF_OUTDIV_PLL2               0XFF
+
+/* Register Masks */
+#define RM_PLLCTLF_OUTDIV_PLL2 \
+	 RM(FM_PLLCTLF_OUTDIV_PLL2, FB_PLLCTLF_OUTDIV_PLL2)
+
+
+/*******************************
+ *      R_PLLCTL10 (0x55)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL10_FBDIV_PLL2L              0
+
+/* Field Masks */
+#define FM_PLLCTL10_FBDIV_PLL2L              0XFF
+
+/* Register Masks */
+#define RM_PLLCTL10_FBDIV_PLL2L \
+	 RM(FM_PLLCTL10_FBDIV_PLL2L, FB_PLLCTL10_FBDIV_PLL2L)
+
+
+/*******************************
+ *      R_PLLCTL11 (0x56)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL11_FBDIV_PLL2H              0
+
+/* Field Masks */
+#define FM_PLLCTL11_FBDIV_PLL2H              0X7
+
+/* Register Masks */
+#define RM_PLLCTL11_FBDIV_PLL2H \
+	 RM(FM_PLLCTL11_FBDIV_PLL2H, FB_PLLCTL11_FBDIV_PLL2H)
+
+
+/*******************************
+ *      R_PLLCTL12 (0x57)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL12_RZ_PLL2                  3
+#define FB_PLLCTL12_CP_PLL2                  0
+
+/* Field Masks */
+#define FM_PLLCTL12_RZ_PLL2                  0X7
+#define FM_PLLCTL12_CP_PLL2                  0X7
+
+/* Register Masks */
+#define RM_PLLCTL12_RZ_PLL2 \
+	 RM(FM_PLLCTL12_RZ_PLL2, FB_PLLCTL12_RZ_PLL2)
+
+#define RM_PLLCTL12_CP_PLL2 \
+	 RM(FM_PLLCTL12_CP_PLL2, FB_PLLCTL12_CP_PLL2)
+
+
+/*******************************
+ *      R_PLLCTL1B (0x60)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1B_VCOI_PLL2                4
+#define FB_PLLCTL1B_VCOI_PLL1                2
+
+/* Field Masks */
+#define FM_PLLCTL1B_VCOI_PLL2                0X3
+#define FM_PLLCTL1B_VCOI_PLL1                0X3
+
+/* Register Masks */
+#define RM_PLLCTL1B_VCOI_PLL2 \
+	 RM(FM_PLLCTL1B_VCOI_PLL2, FB_PLLCTL1B_VCOI_PLL2)
+
+#define RM_PLLCTL1B_VCOI_PLL1 \
+	 RM(FM_PLLCTL1B_VCOI_PLL1, FB_PLLCTL1B_VCOI_PLL1)
+
+
+/*******************************
+ *      R_PLLCTL1C (0x61)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1C_PDB_PLL2                 2
+#define FB_PLLCTL1C_PDB_PLL1                 1
+
+/* Field Masks */
+#define FM_PLLCTL1C_PDB_PLL2                 0X1
+#define FM_PLLCTL1C_PDB_PLL1                 0X1
+
+/* Field Values */
+#define FV_PLLCTL1C_PDB_PLL2_ENABLE          0x1
+#define FV_PLLCTL1C_PDB_PLL2_DISABLE         0x0
+#define FV_PLLCTL1C_PDB_PLL1_ENABLE          0x1
+#define FV_PLLCTL1C_PDB_PLL1_DISABLE         0x0
+
+/* Register Masks */
+#define RM_PLLCTL1C_PDB_PLL2 \
+	 RM(FM_PLLCTL1C_PDB_PLL2, FB_PLLCTL1C_PDB_PLL2)
+
+#define RM_PLLCTL1C_PDB_PLL1 \
+	 RM(FM_PLLCTL1C_PDB_PLL1, FB_PLLCTL1C_PDB_PLL1)
+
+
+/* Register Values */
+#define RV_PLLCTL1C_PDB_PLL2_ENABLE \
+	 RV(FV_PLLCTL1C_PDB_PLL2_ENABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL2_DISABLE \
+	 RV(FV_PLLCTL1C_PDB_PLL2_DISABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL1_ENABLE \
+	 RV(FV_PLLCTL1C_PDB_PLL1_ENABLE, FB_PLLCTL1C_PDB_PLL1)
+
+#define RV_PLLCTL1C_PDB_PLL1_DISABLE \
+	 RV(FV_PLLCTL1C_PDB_PLL1_DISABLE, FB_PLLCTL1C_PDB_PLL1)
+
+
+/*******************************
+ *      R_TIMEBASE (0x77)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_TIMEBASE_DIVIDER                  0
+
+/* Field Masks */
+#define FM_TIMEBASE_DIVIDER                  0XFF
+
+/* Register Masks */
+#define RM_TIMEBASE_DIVIDER \
+	 RM(FM_TIMEBASE_DIVIDER, FB_TIMEBASE_DIVIDER)
+
+
+/*****************************
+ *      R_DEVIDL (0x7D)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDL_DIDL                       0
+
+/* Field Masks */
+#define FM_DEVIDL_DIDL                       0XFF
+
+/* Register Masks */
+#define RM_DEVIDL_DIDL                       RM(FM_DEVIDL_DIDL, FB_DEVIDL_DIDL)
+
+/*****************************
+ *      R_DEVIDH (0x7E)      *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDH_DIDH                       0
+
+/* Field Masks */
+#define FM_DEVIDH_DIDH                       0XFF
+
+/* Register Masks */
+#define RM_DEVIDH_DIDH                       RM(FM_DEVIDH_DIDH, FB_DEVIDH_DIDH)
+
+/****************************
+ *      R_RESET (0x80)      *
+ ****************************/
+
+/* Field Offsets */
+#define FB_RESET                             0
+
+/* Field Masks */
+#define FM_RESET                             0XFF
+
+/* Field Values */
+#define FV_RESET_ENABLE                      0x85
+
+/* Register Masks */
+#define RM_RESET                             RM(FM_RESET, FB_RESET)
+
+/* Register Values */
+#define RV_RESET_ENABLE                      RV(FV_RESET_ENABLE, FB_RESET)
+
+/********************************
+ *      R_DACCRSTAT (0x8A)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRSTAT_DACCR_BUSY              7
+
+/* Field Masks */
+#define FM_DACCRSTAT_DACCR_BUSY              0X1
+
+/* Register Masks */
+#define RM_DACCRSTAT_DACCR_BUSY \
+	 RM(FM_DACCRSTAT_DACCR_BUSY, FB_DACCRSTAT_DACCR_BUSY)
+
+
+/******************************
+ *      R_PLLCTL0 (0x8E)      *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL0_PLL2_LOCK                 1
+#define FB_PLLCTL0_PLL1_LOCK                 0
+
+/* Field Masks */
+#define FM_PLLCTL0_PLL2_LOCK                 0X1
+#define FM_PLLCTL0_PLL1_LOCK                 0X1
+
+/* Register Masks */
+#define RM_PLLCTL0_PLL2_LOCK \
+	 RM(FM_PLLCTL0_PLL2_LOCK, FB_PLLCTL0_PLL2_LOCK)
+
+#define RM_PLLCTL0_PLL1_LOCK \
+	 RM(FM_PLLCTL0_PLL1_LOCK, FB_PLLCTL0_PLL1_LOCK)
+
+
+/********************************
+ *      R_PLLREFSEL (0x8F)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_PLLREFSEL_PLL2_REF_SEL            4
+#define FB_PLLREFSEL_PLL1_REF_SEL            0
+
+/* Field Masks */
+#define FM_PLLREFSEL_PLL2_REF_SEL            0X7
+#define FM_PLLREFSEL_PLL1_REF_SEL            0X7
+
+/* Field Values */
+#define FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL2_REF_SEL_MCLK2      0x1
+#define FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL1_REF_SEL_MCLK2      0x1
+
+/* Register Masks */
+#define RM_PLLREFSEL_PLL2_REF_SEL \
+	 RM(FM_PLLREFSEL_PLL2_REF_SEL, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RM_PLLREFSEL_PLL1_REF_SEL \
+	 RM(FM_PLLREFSEL_PLL1_REF_SEL, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/* Register Values */
+#define RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 \
+	 RV(FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL2_REF_SEL_MCLK2 \
+	 RV(FV_PLLREFSEL_PLL2_REF_SEL_MCLK2, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 \
+	 RV(FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL1_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 \
+	 RV(FV_PLLREFSEL_PLL1_REF_SEL_MCLK2, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/*******************************
+ *      R_DACMBCEN (0xC7)      *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACMBCEN_MBCEN3                   2
+#define FB_DACMBCEN_MBCEN2                   1
+#define FB_DACMBCEN_MBCEN1                   0
+
+/* Field Masks */
+#define FM_DACMBCEN_MBCEN3                   0X1
+#define FM_DACMBCEN_MBCEN2                   0X1
+#define FM_DACMBCEN_MBCEN1                   0X1
+
+/* Register Masks */
+#define RM_DACMBCEN_MBCEN3 \
+	 RM(FM_DACMBCEN_MBCEN3, FB_DACMBCEN_MBCEN3)
+
+#define RM_DACMBCEN_MBCEN2 \
+	 RM(FM_DACMBCEN_MBCEN2, FB_DACMBCEN_MBCEN2)
+
+#define RM_DACMBCEN_MBCEN1 \
+	 RM(FM_DACMBCEN_MBCEN1, FB_DACMBCEN_MBCEN1)
+
+
+/********************************
+ *      R_DACMBCCTL (0xC8)      *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACMBCCTL_LVLMODE3                5
+#define FB_DACMBCCTL_WINSEL3                 4
+#define FB_DACMBCCTL_LVLMODE2                3
+#define FB_DACMBCCTL_WINSEL2                 2
+#define FB_DACMBCCTL_LVLMODE1                1
+#define FB_DACMBCCTL_WINSEL1                 0
+
+/* Field Masks */
+#define FM_DACMBCCTL_LVLMODE3                0X1
+#define FM_DACMBCCTL_WINSEL3                 0X1
+#define FM_DACMBCCTL_LVLMODE2                0X1
+#define FM_DACMBCCTL_WINSEL2                 0X1
+#define FM_DACMBCCTL_LVLMODE1                0X1
+#define FM_DACMBCCTL_WINSEL1                 0X1
+
+/* Register Masks */
+#define RM_DACMBCCTL_LVLMODE3 \
+	 RM(FM_DACMBCCTL_LVLMODE3, FB_DACMBCCTL_LVLMODE3)
+
+#define RM_DACMBCCTL_WINSEL3 \
+	 RM(FM_DACMBCCTL_WINSEL3, FB_DACMBCCTL_WINSEL3)
+
+#define RM_DACMBCCTL_LVLMODE2 \
+	 RM(FM_DACMBCCTL_LVLMODE2, FB_DACMBCCTL_LVLMODE2)
+
+#define RM_DACMBCCTL_WINSEL2 \
+	 RM(FM_DACMBCCTL_WINSEL2, FB_DACMBCCTL_WINSEL2)
+
+#define RM_DACMBCCTL_LVLMODE1 \
+	 RM(FM_DACMBCCTL_LVLMODE1, FB_DACMBCCTL_LVLMODE1)
+
+#define RM_DACMBCCTL_WINSEL1 \
+	 RM(FM_DACMBCCTL_WINSEL1, FB_DACMBCCTL_WINSEL1)
+
+
+/*********************************
+ *      R_DACMBCMUG1 (0xC9)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG1_PHASE                  5
+#define FB_DACMBCMUG1_MUGAIN                 0
+
+/* Field Masks */
+#define FM_DACMBCMUG1_PHASE                  0X1
+#define FM_DACMBCMUG1_MUGAIN                 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG1_PHASE \
+	 RM(FM_DACMBCMUG1_PHASE, FB_DACMBCMUG1_PHASE)
+
+#define RM_DACMBCMUG1_MUGAIN \
+	 RM(FM_DACMBCMUG1_MUGAIN, FB_DACMBCMUG1_MUGAIN)
+
+
+/*********************************
+ *      R_DACMBCTHR1 (0xCA)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR1_THRESH                 0
+
+/* Field Masks */
+#define FM_DACMBCTHR1_THRESH                 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR1_THRESH \
+	 RM(FM_DACMBCTHR1_THRESH, FB_DACMBCTHR1_THRESH)
+
+
+/*********************************
+ *      R_DACMBCRAT1 (0xCB)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT1_RATIO                  0
+
+/* Field Masks */
+#define FM_DACMBCRAT1_RATIO                  0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT1_RATIO \
+	 RM(FM_DACMBCRAT1_RATIO, FB_DACMBCRAT1_RATIO)
+
+
+/**********************************
+ *      R_DACMBCATK1L (0xCC)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1L_TCATKL                0
+
+/* Field Masks */
+#define FM_DACMBCATK1L_TCATKL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1L_TCATKL \
+	 RM(FM_DACMBCATK1L_TCATKL, FB_DACMBCATK1L_TCATKL)
+
+
+/**********************************
+ *      R_DACMBCATK1H (0xCD)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1H_TCATKH                0
+
+/* Field Masks */
+#define FM_DACMBCATK1H_TCATKH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1H_TCATKH \
+	 RM(FM_DACMBCATK1H_TCATKH, FB_DACMBCATK1H_TCATKH)
+
+
+/**********************************
+ *      R_DACMBCREL1L (0xCE)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1L_TCRELL                0
+
+/* Field Masks */
+#define FM_DACMBCREL1L_TCRELL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1L_TCRELL \
+	 RM(FM_DACMBCREL1L_TCRELL, FB_DACMBCREL1L_TCRELL)
+
+
+/**********************************
+ *      R_DACMBCREL1H (0xCF)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1H_TCRELH                0
+
+/* Field Masks */
+#define FM_DACMBCREL1H_TCRELH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1H_TCRELH \
+	 RM(FM_DACMBCREL1H_TCRELH, FB_DACMBCREL1H_TCRELH)
+
+
+/*********************************
+ *      R_DACMBCMUG2 (0xD0)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG2_PHASE                  5
+#define FB_DACMBCMUG2_MUGAIN                 0
+
+/* Field Masks */
+#define FM_DACMBCMUG2_PHASE                  0X1
+#define FM_DACMBCMUG2_MUGAIN                 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG2_PHASE \
+	 RM(FM_DACMBCMUG2_PHASE, FB_DACMBCMUG2_PHASE)
+
+#define RM_DACMBCMUG2_MUGAIN \
+	 RM(FM_DACMBCMUG2_MUGAIN, FB_DACMBCMUG2_MUGAIN)
+
+
+/*********************************
+ *      R_DACMBCTHR2 (0xD1)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR2_THRESH                 0
+
+/* Field Masks */
+#define FM_DACMBCTHR2_THRESH                 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR2_THRESH \
+	 RM(FM_DACMBCTHR2_THRESH, FB_DACMBCTHR2_THRESH)
+
+
+/*********************************
+ *      R_DACMBCRAT2 (0xD2)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT2_RATIO                  0
+
+/* Field Masks */
+#define FM_DACMBCRAT2_RATIO                  0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT2_RATIO \
+	 RM(FM_DACMBCRAT2_RATIO, FB_DACMBCRAT2_RATIO)
+
+
+/**********************************
+ *      R_DACMBCATK2L (0xD3)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2L_TCATKL                0
+
+/* Field Masks */
+#define FM_DACMBCATK2L_TCATKL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2L_TCATKL \
+	 RM(FM_DACMBCATK2L_TCATKL, FB_DACMBCATK2L_TCATKL)
+
+
+/**********************************
+ *      R_DACMBCATK2H (0xD4)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2H_TCATKH                0
+
+/* Field Masks */
+#define FM_DACMBCATK2H_TCATKH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2H_TCATKH \
+	 RM(FM_DACMBCATK2H_TCATKH, FB_DACMBCATK2H_TCATKH)
+
+
+/**********************************
+ *      R_DACMBCREL2L (0xD5)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2L_TCRELL                0
+
+/* Field Masks */
+#define FM_DACMBCREL2L_TCRELL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2L_TCRELL \
+	 RM(FM_DACMBCREL2L_TCRELL, FB_DACMBCREL2L_TCRELL)
+
+
+/**********************************
+ *      R_DACMBCREL2H (0xD6)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2H_TCRELH                0
+
+/* Field Masks */
+#define FM_DACMBCREL2H_TCRELH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2H_TCRELH \
+	 RM(FM_DACMBCREL2H_TCRELH, FB_DACMBCREL2H_TCRELH)
+
+
+/*********************************
+ *      R_DACMBCMUG3 (0xD7)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG3_PHASE                  5
+#define FB_DACMBCMUG3_MUGAIN                 0
+
+/* Field Masks */
+#define FM_DACMBCMUG3_PHASE                  0X1
+#define FM_DACMBCMUG3_MUGAIN                 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG3_PHASE \
+	 RM(FM_DACMBCMUG3_PHASE, FB_DACMBCMUG3_PHASE)
+
+#define RM_DACMBCMUG3_MUGAIN \
+	 RM(FM_DACMBCMUG3_MUGAIN, FB_DACMBCMUG3_MUGAIN)
+
+
+/*********************************
+ *      R_DACMBCTHR3 (0xD8)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR3_THRESH                 0
+
+/* Field Masks */
+#define FM_DACMBCTHR3_THRESH                 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR3_THRESH \
+	 RM(FM_DACMBCTHR3_THRESH, FB_DACMBCTHR3_THRESH)
+
+
+/*********************************
+ *      R_DACMBCRAT3 (0xD9)      *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT3_RATIO                  0
+
+/* Field Masks */
+#define FM_DACMBCRAT3_RATIO                  0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT3_RATIO \
+	 RM(FM_DACMBCRAT3_RATIO, FB_DACMBCRAT3_RATIO)
+
+
+/**********************************
+ *      R_DACMBCATK3L (0xDA)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3L_TCATKL                0
+
+/* Field Masks */
+#define FM_DACMBCATK3L_TCATKL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3L_TCATKL \
+	 RM(FM_DACMBCATK3L_TCATKL, FB_DACMBCATK3L_TCATKL)
+
+
+/**********************************
+ *      R_DACMBCATK3H (0xDB)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3H_TCATKH                0
+
+/* Field Masks */
+#define FM_DACMBCATK3H_TCATKH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3H_TCATKH \
+	 RM(FM_DACMBCATK3H_TCATKH, FB_DACMBCATK3H_TCATKH)
+
+
+/**********************************
+ *      R_DACMBCREL3L (0xDC)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3L_TCRELL                0
+
+/* Field Masks */
+#define FM_DACMBCREL3L_TCRELL                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3L_TCRELL \
+	 RM(FM_DACMBCREL3L_TCRELL, FB_DACMBCREL3L_TCRELL)
+
+
+/**********************************
+ *      R_DACMBCREL3H (0xDD)      *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3H_TCRELH                0
+
+/* Field Masks */
+#define FM_DACMBCREL3H_TCRELH                0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3H_TCRELH \
+	 RM(FM_DACMBCREL3H_TCRELH, FB_DACMBCREL3H_TCRELH)
+
+
+#endif /* __WOOKIE_H__ */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index cfe72b9d4356069327e80d592ff1e660bca39258..8798182959c1d9338bde08d38d38b05089f83266 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -240,7 +240,6 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
 				     sizeof(struct twl4030_codec_data),
 				     GFP_KERNEL);
 		if (!pdata) {
-			dev_err(codec->dev, "Can not allocate memory\n");
 			of_node_put(twl4030_codec_node);
 			return NULL;
 		}
@@ -851,14 +850,14 @@ static int snd_soc_get_volsw_twl4030(struct snd_kcontrol *kcontrol,
 	int mask = (1 << fls(max)) - 1;
 
 	ucontrol->value.integer.value[0] =
-		(snd_soc_read(codec, reg) >> shift) & mask;
+		(twl4030_read(codec, reg) >> shift) & mask;
 	if (ucontrol->value.integer.value[0])
 		ucontrol->value.integer.value[0] =
 			max + 1 - ucontrol->value.integer.value[0];
 
 	if (shift != rshift) {
 		ucontrol->value.integer.value[1] =
-			(snd_soc_read(codec, reg) >> rshift) & mask;
+			(twl4030_read(codec, reg) >> rshift) & mask;
 		if (ucontrol->value.integer.value[1])
 			ucontrol->value.integer.value[1] =
 				max + 1 - ucontrol->value.integer.value[1];
@@ -909,9 +908,9 @@ static int snd_soc_get_volsw_r2_twl4030(struct snd_kcontrol *kcontrol,
 	int mask = (1<<fls(max))-1;
 
 	ucontrol->value.integer.value[0] =
-		(snd_soc_read(codec, reg) >> shift) & mask;
+		(twl4030_read(codec, reg) >> shift) & mask;
 	ucontrol->value.integer.value[1] =
-		(snd_soc_read(codec, reg2) >> shift) & mask;
+		(twl4030_read(codec, reg2) >> shift) & mask;
 
 	if (ucontrol->value.integer.value[0])
 		ucontrol->value.integer.value[0] =
@@ -2196,8 +2195,6 @@ static int twl4030_soc_remove(struct snd_soc_codec *codec)
 static const struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
 	.probe = twl4030_soc_probe,
 	.remove = twl4030_soc_remove,
-	.read = twl4030_read,
-	.write = twl4030_write,
 	.set_bias_level = twl4030_set_bias_level,
 	.idle_bias_off = true,
 
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 1773ff84ee3b23a659341efed47aa0d41770b78b..3b895b4b451c1d319d61b6ca9b0d777ad6d2a49b 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -106,10 +106,12 @@ static const struct snd_pcm_hw_constraint_list sysclk_constraints[] = {
 	{ .count = ARRAY_SIZE(hp_rates), .list = hp_rates, },
 };
 
+#define to_twl6040(codec)	dev_get_drvdata((codec)->dev->parent)
+
 static unsigned int twl6040_read(struct snd_soc_codec *codec, unsigned int reg)
 {
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
-	struct twl6040 *twl6040 = codec->control_data;
+	struct twl6040 *twl6040 = to_twl6040(codec);
 	u8 value;
 
 	if (reg >= TWL6040_CACHEREGNUM)
@@ -171,7 +173,7 @@ static inline void twl6040_update_dl12_cache(struct snd_soc_codec *codec,
 static int twl6040_write(struct snd_soc_codec *codec,
 			unsigned int reg, unsigned int value)
 {
-	struct twl6040 *twl6040 = codec->control_data;
+	struct twl6040 *twl6040 = to_twl6040(codec);
 
 	if (reg >= TWL6040_CACHEREGNUM)
 		return -EIO;
@@ -541,7 +543,7 @@ int twl6040_get_dl1_gain(struct snd_soc_codec *codec)
 	if (snd_soc_dapm_get_pin_status(dapm, "HSOR") ||
 		snd_soc_dapm_get_pin_status(dapm, "HSOL")) {
 
-		u8 val = snd_soc_read(codec, TWL6040_REG_HSLCTL);
+		u8 val = twl6040_read(codec, TWL6040_REG_HSLCTL);
 		if (val & TWL6040_HSDACMODE)
 			/* HSDACL in LP mode */
 			return -8; /* -8dB */
@@ -572,7 +574,7 @@ EXPORT_SYMBOL_GPL(twl6040_get_trim_value);
 
 int twl6040_get_hs_step_size(struct snd_soc_codec *codec)
 {
-	struct twl6040 *twl6040 = codec->control_data;
+	struct twl6040 *twl6040 = to_twl6040(codec);
 
 	if (twl6040_get_revid(twl6040) < TWL6040_REV_ES1_3)
 		/* For ES under ES_1.3 HS step is 2 mV */
@@ -830,7 +832,7 @@ static const struct snd_soc_dapm_route intercon[] = {
 static int twl6040_set_bias_level(struct snd_soc_codec *codec,
 				enum snd_soc_bias_level level)
 {
-	struct twl6040 *twl6040 = codec->control_data;
+	struct twl6040 *twl6040 = to_twl6040(codec);
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 	int ret = 0;
 
@@ -922,7 +924,7 @@ static int twl6040_prepare(struct snd_pcm_substream *substream,
 			struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	struct twl6040 *twl6040 = codec->control_data;
+	struct twl6040 *twl6040 = to_twl6040(codec);
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
@@ -964,7 +966,7 @@ static int twl6040_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id id,
 			     int mute)
 {
-	struct twl6040 *twl6040 = codec->control_data;
+	struct twl6040 *twl6040 = to_twl6040(codec);
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 	int hslctl, hsrctl, earctl;
 	int hflctl, hfrctl;
@@ -1108,7 +1110,6 @@ static struct snd_soc_dai_driver twl6040_dai[] = {
 static int twl6040_probe(struct snd_soc_codec *codec)
 {
 	struct twl6040_data *priv;
-	struct twl6040 *twl6040 = dev_get_drvdata(codec->dev->parent);
 	struct platform_device *pdev = to_platform_device(codec->dev);
 	int ret = 0;
 
@@ -1119,7 +1120,6 @@ static int twl6040_probe(struct snd_soc_codec *codec)
 	snd_soc_codec_set_drvdata(codec, priv);
 
 	priv->codec = codec;
-	codec->control_data = twl6040;
 
 	priv->plug_irq = platform_get_irq(pdev, 0);
 	if (priv->plug_irq < 0) {
@@ -1158,8 +1158,6 @@ static int twl6040_remove(struct snd_soc_codec *codec)
 static const struct snd_soc_codec_driver soc_codec_dev_twl6040 = {
 	.probe = twl6040_probe,
 	.remove = twl6040_remove,
-	.read = twl6040_read,
-	.write = twl6040_write,
 	.set_bias_level = twl6040_set_bias_level,
 	.suspend_bias_off = true,
 	.ignore_pmdown_time = true,
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 926c81ae81853477241c1deecb5400a226f1665a..46a495b4da8d828e614f47e5eb3fc041eb6ab4dc 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -37,7 +37,8 @@ struct uda1380_priv {
 	struct snd_soc_codec *codec;
 	unsigned int dac_clk;
 	struct work_struct work;
-	void *control_data;
+	struct i2c_client *i2c;
+	u16 *reg_cache;
 };
 
 /*
@@ -63,7 +64,9 @@ static unsigned long uda1380_cache_dirty;
 static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
 	unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+	u16 *cache = uda1380->reg_cache;
+
 	if (reg == UDA1380_RESET)
 		return 0;
 	if (reg >= UDA1380_CACHEREGNUM)
@@ -77,7 +80,8 @@ static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
 static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
 	u16 reg, unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
+	struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+	u16 *cache = uda1380->reg_cache;
 
 	if (reg >= UDA1380_CACHEREGNUM)
 		return;
@@ -92,6 +96,7 @@ static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
 static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
 	unsigned int value)
 {
+	struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
 	u8 data[3];
 
 	/* data is
@@ -111,10 +116,10 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
 	if (!snd_soc_codec_is_active(codec) && (reg >= UDA1380_MVOL))
 		return 0;
 	pr_debug("uda1380: hw write %x val %x\n", reg, value);
-	if (codec->hw_write(codec->control_data, data, 3) == 3) {
+	if (i2c_master_send(uda1380->i2c, data, 3) == 3) {
 		unsigned int val;
-		i2c_master_send(codec->control_data, data, 1);
-		i2c_master_recv(codec->control_data, data, 2);
+		i2c_master_send(uda1380->i2c, data, 1);
+		i2c_master_recv(uda1380->i2c, data, 2);
 		val = (data[0]<<8) | data[1];
 		if (val != value) {
 			pr_debug("uda1380: READ BACK VAL %x\n",
@@ -130,16 +135,17 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
 
 static void uda1380_sync_cache(struct snd_soc_codec *codec)
 {
+	struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
 	int reg;
 	u8 data[3];
-	u16 *cache = codec->reg_cache;
+	u16 *cache = uda1380->reg_cache;
 
 	/* Sync reg_cache with the hardware */
 	for (reg = 0; reg < UDA1380_MVOL; reg++) {
 		data[0] = reg;
 		data[1] = (cache[reg] & 0xff00) >> 8;
 		data[2] = cache[reg] & 0x00ff;
-		if (codec->hw_write(codec->control_data, data, 3) != 3)
+		if (i2c_master_send(uda1380->i2c, data, 3) != 3)
 			dev_err(codec->dev, "%s: write to reg 0x%x failed\n",
 				__func__, reg);
 	}
@@ -148,6 +154,7 @@ static void uda1380_sync_cache(struct snd_soc_codec *codec)
 static int uda1380_reset(struct snd_soc_codec *codec)
 {
 	struct uda1380_platform_data *pdata = codec->dev->platform_data;
+	struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
 
 	if (gpio_is_valid(pdata->gpio_reset)) {
 		gpio_set_value(pdata->gpio_reset, 1);
@@ -160,7 +167,7 @@ static int uda1380_reset(struct snd_soc_codec *codec)
 		data[1] = 0;
 		data[2] = 0;
 
-		if (codec->hw_write(codec->control_data, data, 3) != 3) {
+		if (i2c_master_send(uda1380->i2c, data, 3) != 3) {
 			dev_err(codec->dev, "%s: failed\n", __func__);
 			return -EIO;
 		}
@@ -695,9 +702,6 @@ static int uda1380_probe(struct snd_soc_codec *codec)
 
 	uda1380->codec = codec;
 
-	codec->hw_write = (hw_write_t)i2c_master_send;
-	codec->control_data = uda1380->control_data;
-
 	if (!gpio_is_valid(pdata->gpio_power)) {
 		ret = uda1380_reset(codec);
 		if (ret)
@@ -722,16 +726,9 @@ static int uda1380_probe(struct snd_soc_codec *codec)
 
 static const struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
 	.probe =	uda1380_probe,
-	.read =		uda1380_read_reg_cache,
-	.write =	uda1380_write,
 	.set_bias_level = uda1380_set_bias_level,
 	.suspend_bias_off = true,
 
-	.reg_cache_size = ARRAY_SIZE(uda1380_reg),
-	.reg_word_size = sizeof(u16),
-	.reg_cache_default = uda1380_reg,
-	.reg_cache_step = 1,
-
 	.component_driver = {
 		.controls		= uda1380_snd_controls,
 		.num_controls		= ARRAY_SIZE(uda1380_snd_controls),
@@ -771,8 +768,15 @@ static int uda1380_i2c_probe(struct i2c_client *i2c,
 			return ret;
 	}
 
+	uda1380->reg_cache = devm_kmemdup(&i2c->dev,
+					uda1380_reg,
+					ARRAY_SIZE(uda1380_reg) * sizeof(u16),
+					GFP_KERNEL);
+	if (!uda1380->reg_cache)
+		return -ENOMEM;
+
 	i2c_set_clientdata(i2c, uda1380);
-	uda1380->control_data = i2c;
+	uda1380->i2c = i2c;
 
 	ret =  snd_soc_register_codec(&i2c->dev,
 			&soc_codec_dev_uda1380, uda1380_dai, ARRAY_SIZE(uda1380_dai));
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 4f5f5710b56941ddbee4e827bab1d16b79734a5e..0147d2fb7b0a28b389069616bfe8cdb2dc21f4e2 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -655,11 +655,8 @@ static int wm0010_boot(struct snd_soc_codec *codec)
 		ret = -ENOMEM;
 		len = pll_rec.length + 8;
 		out = kzalloc(len, GFP_KERNEL | GFP_DMA);
-		if (!out) {
-			dev_err(codec->dev,
-				"Failed to allocate RX buffer\n");
+		if (!out)
 			goto abort;
-		}
 
 		img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
 		if (!img_swap)
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 23cde3a0dc112427cdc5eec64619eb32b43a4595..abfa052c07d83502800cbd84f8d0303ea18a9af8 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -13,7 +13,7 @@
  * 'wm2000_anc.bin' by default (overridable via platform data) at
  * runtime and is expected to be in flat binary format.  This is
  * generated by Wolfson configuration tools and includes
- * system-specific callibration information.  If supplied as a
+ * system-specific calibration information.  If supplied as a
  * sequence of ASCII-encoded hexidecimal bytes this can be converted
  * into a flat binary with a command such as this on the command line:
  *
@@ -826,8 +826,7 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
 	int reg;
 	u16 id;
 
-	wm2000 = devm_kzalloc(&i2c->dev, sizeof(struct wm2000_priv),
-			      GFP_KERNEL);
+	wm2000 = devm_kzalloc(&i2c->dev, sizeof(*wm2000), GFP_KERNEL);
 	if (!wm2000)
 		return -ENOMEM;
 
@@ -902,7 +901,6 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
 					    wm2000->anc_download_size,
 					    GFP_KERNEL);
 	if (wm2000->anc_download == NULL) {
-		dev_err(&i2c->dev, "Out of memory\n");
 		ret = -ENOMEM;
 		goto err_supplies;
 	}
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index d83dab57a1d1f5171afbc9bf6b9d1c2f9c6a40ba..5c2f5727244d7914c46531276b295650703e6bd3 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -98,6 +98,8 @@ struct wm2200_priv {
 
 	int rev;
 	int sysclk;
+
+	unsigned int symmetric_rates:1;
 };
 
 #define WM2200_DSP_RANGE_BASE (WM2200_MAX_REGISTER + 1)
@@ -1550,7 +1552,7 @@ static const struct snd_soc_dapm_route wm2200_dapm_routes[] = {
 
 static int wm2200_probe(struct snd_soc_codec *codec)
 {
-	struct wm2200_priv *wm2200 = dev_get_drvdata(codec->dev);
+	struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
 	wm2200->codec = codec;
@@ -1758,7 +1760,7 @@ static int wm2200_hw_params(struct snd_pcm_substream *substream,
 	lrclk = bclk_rates[bclk] / params_rate(params);
 	dev_dbg(codec->dev, "Setting %dHz LRCLK\n", bclk_rates[bclk] / lrclk);
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
-	    dai->symmetric_rates)
+	    wm2200->symmetric_rates)
 		snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_7,
 				    WM2200_AIF1RX_BCPF_MASK, lrclk);
 	else
@@ -2059,13 +2061,14 @@ static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
 static int wm2200_dai_probe(struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
+	struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
 	unsigned int val = 0;
 	int ret;
 
 	ret = snd_soc_read(codec, WM2200_GPIO_CTRL_1);
 	if (ret >= 0) {
 		if ((ret & WM2200_GP1_FN_MASK) != 0) {
-			dai->symmetric_rates = true;
+			wm2200->symmetric_rates = true;
 			val = WM2200_AIF1TX_LRCLK_SRC;
 		}
 	} else {
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 4f0481d3c7a7117894c209d217d403e1946d5393..fc066caa191884fa324c8813915edfcfcf5203d5 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1935,8 +1935,11 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->core.arizona;
 	int ret;
 
+	snd_soc_codec_init_regmap(codec, arizona->regmap);
+
 	ret = wm_adsp2_codec_probe(&priv->core.adsp[0], codec);
 	if (ret)
 		return ret;
@@ -1989,17 +1992,9 @@ static unsigned int wm5102_digital_vu[] = {
 	ARIZONA_DAC_DIGITAL_VOLUME_5R,
 };
 
-static struct regmap *wm5102_get_regmap(struct device *dev)
-{
-	struct wm5102_priv *priv = dev_get_drvdata(dev);
-
-	return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
 	.probe = wm5102_codec_probe,
 	.remove = wm5102_codec_remove,
-	.get_regmap = wm5102_get_regmap,
 
 	.idle_bias_off = true,
 
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 6ed1e1f9ce516cb4867f51a0edb1e83e5ddf34a7..fb0cf9c61f48e8b4fc5af8aa1a1e9e3a0e580f95 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2280,9 +2280,11 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->core.arizona;
 	int i, ret;
 
-	priv->core.arizona->dapm = dapm;
+	arizona->dapm = dapm;
+	snd_soc_codec_init_regmap(codec, arizona->regmap);
 
 	ret = arizona_init_spk(codec);
 	if (ret < 0)
@@ -2344,17 +2346,9 @@ static unsigned int wm5110_digital_vu[] = {
 	ARIZONA_DAC_DIGITAL_VOLUME_6R,
 };
 
-static struct regmap *wm5110_get_regmap(struct device *dev)
-{
-	struct wm5110_priv *priv = dev_get_drvdata(dev);
-
-	return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
 	.probe = wm5110_codec_probe,
 	.remove = wm5110_codec_remove,
-	.get_regmap = wm5110_get_regmap,
 
 	.idle_bias_off = true,
 
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 2efc5b41ad0fe32f138384e91462af562e627a7d..fc79c6725d06f93f26efcb3eaf56eba70e872b6a 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1472,6 +1472,8 @@ static  int wm8350_codec_probe(struct snd_soc_codec *codec)
 			    GFP_KERNEL);
 	if (priv == NULL)
 		return -ENOMEM;
+
+	snd_soc_codec_init_regmap(codec, wm8350->regmap);
 	snd_soc_codec_set_drvdata(codec, priv);
 
 	priv->wm8350 = wm8350;
@@ -1580,17 +1582,9 @@ static int  wm8350_codec_remove(struct snd_soc_codec *codec)
 	return 0;
 }
 
-static struct regmap *wm8350_get_regmap(struct device *dev)
-{
-	struct wm8350 *wm8350 = dev_get_platdata(dev);
-
-	return wm8350->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
 	.probe =	wm8350_codec_probe,
 	.remove =	wm8350_codec_remove,
-	.get_regmap =	wm8350_get_regmap,
 	.set_bias_level = wm8350_set_bias_level,
 	.suspend_bias_off = true,
 
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 6c59fb933bd6a940571fdccd8f3b4af765f7d256..a36adf881bcae8f40df5e7b805ebda6c90f9a7e5 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1285,6 +1285,7 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
 	if (priv == NULL)
 		return -ENOMEM;
 
+	snd_soc_codec_init_regmap(codec, wm8400->regmap);
 	snd_soc_codec_set_drvdata(codec, priv);
 	priv->wm8400 = wm8400;
 
@@ -1325,17 +1326,9 @@ static int  wm8400_codec_remove(struct snd_soc_codec *codec)
 	return 0;
 }
 
-static struct regmap *wm8400_get_regmap(struct device *dev)
-{
-	struct wm8400 *wm8400 = dev_get_platdata(dev);
-
-	return wm8400->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
 	.probe =	wm8400_codec_probe,
 	.remove =	wm8400_codec_remove,
-	.get_regmap =	wm8400_get_regmap,
 	.set_bias_level = wm8400_set_bias_level,
 	.suspend_bias_off = true,
 
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 237eeb9a8b97fe726f7535ce46f458852a3626b5..cba90f21161fbd2553f39d6aac9756350015a186 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1995,8 +1995,7 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
 	unsigned int val, irq_pol;
 	int ret, i;
 
-	wm8903 = devm_kzalloc(&i2c->dev,  sizeof(struct wm8903_priv),
-			      GFP_KERNEL);
+	wm8903 = devm_kzalloc(&i2c->dev, sizeof(*wm8903), GFP_KERNEL);
 	if (wm8903 == NULL)
 		return -ENOMEM;
 
@@ -2017,13 +2016,10 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
 	if (pdata) {
 		wm8903->pdata = pdata;
 	} else {
-		wm8903->pdata = devm_kzalloc(&i2c->dev,
-					sizeof(struct wm8903_platform_data),
-					GFP_KERNEL);
-		if (wm8903->pdata == NULL) {
-			dev_err(&i2c->dev, "Failed to allocate pdata\n");
+		wm8903->pdata = devm_kzalloc(&i2c->dev, sizeof(*wm8903->pdata),
+					     GFP_KERNEL);
+		if (!wm8903->pdata)
 			return -ENOMEM;
-		}
 
 		if (i2c->irq) {
 			ret = wm8903_set_pdata_irq_trigger(i2c, wm8903->pdata);
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index f91b49e1ece309dcd6175f967eb016a26451de3f..21ffd64031735c03a68fab6d72ea9bb700e386ef 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3993,6 +3993,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
 	unsigned int reg;
 	int ret, i;
 
+	snd_soc_codec_init_regmap(codec, control->regmap);
+
 	wm8994->hubs.codec = codec;
 
 	mutex_init(&wm8994->accdet_lock);
@@ -4434,19 +4436,11 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
 	return 0;
 }
 
-static struct regmap *wm8994_get_regmap(struct device *dev)
-{
-	struct wm8994 *control = dev_get_drvdata(dev->parent);
-
-	return control->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
 	.probe =	wm8994_codec_probe,
 	.remove =	wm8994_codec_remove,
 	.suspend =	wm8994_codec_suspend,
 	.resume =	wm8994_codec_resume,
-	.get_regmap =   wm8994_get_regmap,
 	.set_bias_level = wm8994_set_bias_level,
 };
 
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 77f512767273f4e0813f77bdce632e46b1fd83fb..cac9b3e7e15d3ae982fd2f1ee6a0942cc8cd878d 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1062,8 +1062,11 @@ static int wm8997_codec_probe(struct snd_soc_codec *codec)
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
 	struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->core.arizona;
 	int ret;
 
+	snd_soc_codec_init_regmap(codec, arizona->regmap);
+
 	ret = arizona_init_spk(codec);
 	if (ret < 0)
 		return ret;
@@ -1095,17 +1098,9 @@ static unsigned int wm8997_digital_vu[] = {
 	ARIZONA_DAC_DIGITAL_VOLUME_5R,
 };
 
-static struct regmap *wm8997_get_regmap(struct device *dev)
-{
-	struct wm8997_priv *priv = dev_get_drvdata(dev);
-
-	return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
 	.probe = wm8997_codec_probe,
 	.remove = wm8997_codec_remove,
-	.get_regmap =   wm8997_get_regmap,
 
 	.idle_bias_off = true,
 
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 2d211dbe742255d5b777924f92c20a867e424f35..1288e1f67dcf15f1460b5ee90ff18fb7cc15a0f6 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1275,9 +1275,11 @@ static int wm8998_codec_probe(struct snd_soc_codec *codec)
 	struct wm8998_priv *priv = snd_soc_codec_get_drvdata(codec);
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
+	struct arizona *arizona = priv->core.arizona;
 	int ret;
 
-	priv->core.arizona->dapm = dapm;
+	arizona->dapm = dapm;
+	snd_soc_codec_init_regmap(codec, arizona->regmap);
 
 	ret = arizona_init_spk(codec);
 	if (ret < 0)
@@ -1313,17 +1315,9 @@ static unsigned int wm8998_digital_vu[] = {
 	ARIZONA_DAC_DIGITAL_VOLUME_5R,
 };
 
-static struct regmap *wm8998_get_regmap(struct device *dev)
-{
-	struct wm8998_priv *priv = dev_get_drvdata(dev);
-
-	return priv->core.arizona->regmap;
-}
-
 static const struct snd_soc_codec_driver soc_codec_dev_wm8998 = {
 	.probe = wm8998_codec_probe,
 	.remove = wm8998_codec_remove,
-	.get_regmap = wm8998_get_regmap,
 
 	.idle_bias_off = true,
 
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 804c6f2bcf218a61b492e1a67e24d6d3491b8229..03ba218160caf2c02f9df4856f6b2a10e07a4449 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1242,6 +1242,20 @@ static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params,
 	return snd_mask_refine(fmt, &nfmt);
 }
 
+static int davinci_mcasp_hw_rule_min_periodsize(
+		struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
+{
+	struct snd_interval *period_size = hw_param_interval(params,
+						SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+	struct snd_interval frames;
+
+	snd_interval_any(&frames);
+	frames.min = 64;
+	frames.integer = 1;
+
+	return snd_interval_refine(period_size, &frames);
+}
+
 static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 				 struct snd_soc_dai *cpu_dai)
 {
@@ -1333,6 +1347,11 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
 			return ret;
 	}
 
+	snd_pcm_hw_rule_add(substream->runtime, 0,
+			    SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+			    davinci_mcasp_hw_rule_min_periodsize, NULL,
+			    SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+
 	return 0;
 }
 
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 84ef6385736cd5df845aa2d675caaac67ddef1de..191426a6d9adffa4a4eab4d6f681d3eb1dd101ed 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -29,7 +29,6 @@
 
 #include "../codecs/tlv320aic23.h"
 #include "imx-ssi.h"
-#include "fsl_ssi.h"
 #include "imx-audmux.h"
 
 #define CODEC_CLOCK 12000000
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 1225e0399de8193cc5fce40a61e53bba6fc93e07..989be518c4ed6aad92df786663033516911c97f1 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -442,8 +442,8 @@ static int fsl_asoc_card_late_probe(struct snd_soc_card *card)
 
 	if (fsl_asoc_card_is_ac97(priv)) {
 #if IS_ENABLED(CONFIG_SND_AC97_CODEC)
-		struct snd_soc_codec *codec = rtd->codec;
-		struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+		struct snd_soc_component *component = rtd->codec_dai->component;
+		struct snd_ac97 *ac97 = snd_soc_component_get_drvdata(component);
 
 		/*
 		 * Use slots 3/4 for S/PDIF so SSI won't try to enable
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index 52c27a358933b100d85b2392abe2d2fc4501dd3a..2c5856ac5bc31f7760c8bf4bed2db91454875375 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -57,7 +57,7 @@
 #define REG_ASRDOC			0x74
 #define REG_ASRDI(i)			(REG_ASRDIA + (i << 3))
 #define REG_ASRDO(i)			(REG_ASRDOA + (i << 3))
-#define REG_ASRDx(x, i)			(x == IN ? REG_ASRDI(i) : REG_ASRDO(i))
+#define REG_ASRDx(x, i)			((x) == IN ? REG_ASRDI(i) : REG_ASRDO(i))
 
 #define REG_ASRIDRHA			0x80
 #define REG_ASRIDRLA			0x84
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 424bafaf51efe63e108fcd83ce3c6bda43f9e92c..aecd00f7929d9ee67dd25be751fe6b4765792157 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -69,21 +69,35 @@
  * samples will be written to STX properly.
  */
 #ifdef __BIG_ENDIAN
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
-	 SNDRV_PCM_FMTBIT_S18_3BE | SNDRV_PCM_FMTBIT_S20_3BE | \
-	 SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_S24_BE)
+#define FSLSSI_I2S_FORMATS \
+	(SNDRV_PCM_FMTBIT_S8 | \
+	 SNDRV_PCM_FMTBIT_S16_BE | \
+	 SNDRV_PCM_FMTBIT_S18_3BE | \
+	 SNDRV_PCM_FMTBIT_S20_3BE | \
+	 SNDRV_PCM_FMTBIT_S24_3BE | \
+	 SNDRV_PCM_FMTBIT_S24_BE)
 #else
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \
-	 SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S20_3LE | \
-	 SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+#define FSLSSI_I2S_FORMATS \
+	(SNDRV_PCM_FMTBIT_S8 | \
+	 SNDRV_PCM_FMTBIT_S16_LE | \
+	 SNDRV_PCM_FMTBIT_S18_3LE | \
+	 SNDRV_PCM_FMTBIT_S20_3LE | \
+	 SNDRV_PCM_FMTBIT_S24_3LE | \
+	 SNDRV_PCM_FMTBIT_S24_LE)
 #endif
 
-#define FSLSSI_SIER_DBG_RX_FLAGS (CCSR_SSI_SIER_RFF0_EN | \
-		CCSR_SSI_SIER_RLS_EN | CCSR_SSI_SIER_RFS_EN | \
-		CCSR_SSI_SIER_ROE0_EN | CCSR_SSI_SIER_RFRC_EN)
-#define FSLSSI_SIER_DBG_TX_FLAGS (CCSR_SSI_SIER_TFE0_EN | \
-		CCSR_SSI_SIER_TLS_EN | CCSR_SSI_SIER_TFS_EN | \
-		CCSR_SSI_SIER_TUE0_EN | CCSR_SSI_SIER_TFRC_EN)
+#define FSLSSI_SIER_DBG_RX_FLAGS \
+	(SSI_SIER_RFF0_EN | \
+	 SSI_SIER_RLS_EN | \
+	 SSI_SIER_RFS_EN | \
+	 SSI_SIER_ROE0_EN | \
+	 SSI_SIER_RFRC_EN)
+#define FSLSSI_SIER_DBG_TX_FLAGS \
+	(SSI_SIER_TFE0_EN | \
+	 SSI_SIER_TLS_EN | \
+	 SSI_SIER_TFS_EN | \
+	 SSI_SIER_TUE0_EN | \
+	 SSI_SIER_TFRC_EN)
 
 enum fsl_ssi_type {
 	FSL_SSI_MCP8610,
@@ -92,23 +106,18 @@ enum fsl_ssi_type {
 	FSL_SSI_MX51,
 };
 
-struct fsl_ssi_reg_val {
+struct fsl_ssi_regvals {
 	u32 sier;
 	u32 srcr;
 	u32 stcr;
 	u32 scr;
 };
 
-struct fsl_ssi_rxtx_reg_val {
-	struct fsl_ssi_reg_val rx;
-	struct fsl_ssi_reg_val tx;
-};
-
 static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CCSR_SSI_SACCEN:
-	case CCSR_SSI_SACCDIS:
+	case REG_SSI_SACCEN:
+	case REG_SSI_SACCDIS:
 		return false;
 	default:
 		return true;
@@ -118,18 +127,18 @@ static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
 static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CCSR_SSI_STX0:
-	case CCSR_SSI_STX1:
-	case CCSR_SSI_SRX0:
-	case CCSR_SSI_SRX1:
-	case CCSR_SSI_SISR:
-	case CCSR_SSI_SFCSR:
-	case CCSR_SSI_SACNT:
-	case CCSR_SSI_SACADD:
-	case CCSR_SSI_SACDAT:
-	case CCSR_SSI_SATAG:
-	case CCSR_SSI_SACCST:
-	case CCSR_SSI_SOR:
+	case REG_SSI_STX0:
+	case REG_SSI_STX1:
+	case REG_SSI_SRX0:
+	case REG_SSI_SRX1:
+	case REG_SSI_SISR:
+	case REG_SSI_SFCSR:
+	case REG_SSI_SACNT:
+	case REG_SSI_SACADD:
+	case REG_SSI_SACDAT:
+	case REG_SSI_SATAG:
+	case REG_SSI_SACCST:
+	case REG_SSI_SOR:
 		return true;
 	default:
 		return false;
@@ -139,12 +148,12 @@ static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
 static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CCSR_SSI_SRX0:
-	case CCSR_SSI_SRX1:
-	case CCSR_SSI_SISR:
-	case CCSR_SSI_SACADD:
-	case CCSR_SSI_SACDAT:
-	case CCSR_SSI_SATAG:
+	case REG_SSI_SRX0:
+	case REG_SSI_SRX1:
+	case REG_SSI_SISR:
+	case REG_SSI_SACADD:
+	case REG_SSI_SACDAT:
+	case REG_SSI_SATAG:
 		return true;
 	default:
 		return false;
@@ -154,9 +163,9 @@ static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
 static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CCSR_SSI_SRX0:
-	case CCSR_SSI_SRX1:
-	case CCSR_SSI_SACCST:
+	case REG_SSI_SRX0:
+	case REG_SSI_SRX1:
+	case REG_SSI_SACCST:
 		return false;
 	default:
 		return true;
@@ -164,12 +173,12 @@ static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
 }
 
 static const struct regmap_config fsl_ssi_regconfig = {
-	.max_register = CCSR_SSI_SACCDIS,
+	.max_register = REG_SSI_SACCDIS,
 	.reg_bits = 32,
 	.val_bits = 32,
 	.reg_stride = 4,
 	.val_format_endian = REGMAP_ENDIAN_NATIVE,
-	.num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
+	.num_reg_defaults_raw = REG_SSI_SACCDIS / sizeof(uint32_t) + 1,
 	.readable_reg = fsl_ssi_readable_reg,
 	.volatile_reg = fsl_ssi_volatile_reg,
 	.precious_reg = fsl_ssi_precious_reg,
@@ -185,78 +194,79 @@ struct fsl_ssi_soc_data {
 };
 
 /**
- * fsl_ssi_private: per-SSI private data
+ * fsl_ssi: per-SSI private data
  *
- * @reg: Pointer to the regmap registers
+ * @regs: Pointer to the regmap registers
  * @irq: IRQ of this SSI
  * @cpu_dai_drv: CPU DAI driver for this device
  *
  * @dai_fmt: DAI configuration this device is currently used with
- * @i2s_mode: i2s and network mode configuration of the device. Is used to
- * switch between normal and i2s/network mode
- * mode depending on the number of channels
+ * @i2s_net: I2S and Network mode configurations of SCR register
  * @use_dma: DMA is used or FIQ with stream filter
- * @use_dual_fifo: DMA with support for both FIFOs used
- * @fifo_deph: Depth of the SSI FIFOs
- * @slot_width: width of each DAI slot
- * @slots: number of slots
- * @rxtx_reg_val: Specific register settings for receive/transmit configuration
+ * @use_dual_fifo: DMA with support for dual FIFO mode
+ * @has_ipg_clk_name: If "ipg" is in the clock name list of device tree
+ * @fifo_depth: Depth of the SSI FIFOs
+ * @slot_width: Width of each DAI slot
+ * @slots: Number of slots
+ * @regvals: Specific RX/TX register settings
  *
- * @clk: SSI clock
- * @baudclk: SSI baud clock for master mode
+ * @clk: Clock source to access register
+ * @baudclk: Clock source to generate bit and frame-sync clocks
  * @baudclk_streams: Active streams that are using baudclk
  *
+ * @regcache_sfcsr: Cache sfcsr register value during suspend and resume
+ * @regcache_sacnt: Cache sacnt register value during suspend and resume
+ *
  * @dma_params_tx: DMA transmit parameters
  * @dma_params_rx: DMA receive parameters
  * @ssi_phys: physical address of the SSI registers
  *
  * @fiq_params: FIQ stream filtering parameters
  *
- * @pdev: Pointer to pdev used for deprecated fsl-ssi sound card
+ * @pdev: Pointer to pdev when using fsl-ssi as sound card (ppc only)
+ *        TODO: Should be replaced with simple-sound-card
  *
  * @dbg_stats: Debugging statistics
  *
  * @soc: SoC specific data
+ * @dev: Pointer to &pdev->dev
  *
- * @fifo_watermark: the FIFO watermark setting.  Notifies DMA when
- *             there are @fifo_watermark or fewer words in TX fifo or
- *             @fifo_watermark or more empty words in RX fifo.
- * @dma_maxburst: max number of words to transfer in one go.  So far,
- *             this is always the same as fifo_watermark.
+ * @fifo_watermark: The FIFO watermark setting. Notifies DMA when there are
+ *                  @fifo_watermark or fewer words in TX fifo or
+ *                  @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: Max number of words to transfer in one go. So far,
+ *                this is always the same as fifo_watermark.
+ *
+ * @ac97_reg_lock: Mutex lock to serialize AC97 register access operations
  */
-struct fsl_ssi_private {
+struct fsl_ssi {
 	struct regmap *regs;
 	int irq;
 	struct snd_soc_dai_driver cpu_dai_drv;
 
 	unsigned int dai_fmt;
-	u8 i2s_mode;
+	u8 i2s_net;
 	bool use_dma;
 	bool use_dual_fifo;
 	bool has_ipg_clk_name;
 	unsigned int fifo_depth;
 	unsigned int slot_width;
 	unsigned int slots;
-	struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+	struct fsl_ssi_regvals regvals[2];
 
 	struct clk *clk;
 	struct clk *baudclk;
 	unsigned int baudclk_streams;
 
-	/* regcache for volatile regs */
 	u32 regcache_sfcsr;
 	u32 regcache_sacnt;
 
-	/* DMA params */
 	struct snd_dmaengine_dai_dma_data dma_params_tx;
 	struct snd_dmaengine_dai_dma_data dma_params_rx;
 	dma_addr_t ssi_phys;
 
-	/* params for non-dma FIQ stream filtered mode */
 	struct imx_pcm_fiq_params fiq_params;
 
-	/* Used when using fsl-ssi as sound-card. This is only used by ppc and
-	 * should be replaced with simple-sound-card. */
 	struct platform_device *pdev;
 
 	struct fsl_ssi_dbg dbg_stats;
@@ -271,27 +281,27 @@ struct fsl_ssi_private {
 };
 
 /*
- * imx51 and later SoCs have a slightly different IP that allows the
- * SSI configuration while the SSI unit is running.
- *
- * More important, it is necessary on those SoCs to configure the
- * sperate TX/RX DMA bits just before starting the stream
- * (fsl_ssi_trigger). The SDMA unit has to be configured before fsl_ssi
- * sends any DMA requests to the SDMA unit, otherwise it is not defined
- * how the SDMA unit handles the DMA request.
+ * SoC specific data
  *
- * SDMA units are present on devices starting at imx35 but the imx35
- * reference manual states that the DMA bits should not be changed
- * while the SSI unit is running (SSIEN). So we support the necessary
- * online configuration of fsl-ssi starting at imx51.
+ * Notes:
+ * 1) SSI in earlier SoCS has critical bits in control registers that
+ *    cannot be changed after SSI starts running -- a software reset
+ *    (set SSIEN to 0) is required to change their values. So adding
+ *    an offline_config flag for these SoCs.
+ * 2) SDMA is available since imx35. However, imx35 does not support
+ *    DMA bits changing when SSI is running, so set offline_config.
+ * 3) imx51 and later versions support register configurations when
+ *    SSI is running (SSIEN); For these versions, DMA needs to be
+ *    configured before SSI sends DMA request to avoid an undefined
+ *    DMA request on the SDMA side.
  */
 
 static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
 	.imx = false,
 	.offline_config = true,
-	.sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
-			CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
-			CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+	.sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+			   SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+			   SSI_SISR_TUE0 | SSI_SISR_TUE1,
 };
 
 static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
@@ -304,16 +314,16 @@ static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
 static struct fsl_ssi_soc_data fsl_ssi_imx35 = {
 	.imx = true,
 	.offline_config = true,
-	.sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
-			CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
-			CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+	.sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+			   SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+			   SSI_SISR_TUE0 | SSI_SISR_TUE1,
 };
 
 static struct fsl_ssi_soc_data fsl_ssi_imx51 = {
 	.imx = true,
 	.offline_config = false,
-	.sisr_write_mask = CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
-		CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+	.sisr_write_mask = SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+			   SSI_SISR_TUE0 | SSI_SISR_TUE1,
 };
 
 static const struct of_device_id fsl_ssi_ids[] = {
@@ -325,108 +335,86 @@ static const struct of_device_id fsl_ssi_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
 
-static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_ac97(struct fsl_ssi *ssi)
 {
-	return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+	return (ssi->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
 		SND_SOC_DAIFMT_AC97;
 }
 
-static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_master(struct fsl_ssi *ssi)
 {
-	return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+	return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
 		SND_SOC_DAIFMT_CBS_CFS;
 }
 
-static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi *ssi)
 {
-	return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+	return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
 		SND_SOC_DAIFMT_CBM_CFS;
 }
+
 /**
- * fsl_ssi_isr: SSI interrupt handler
- *
- * Although it's possible to use the interrupt handler to send and receive
- * data to/from the SSI, we use the DMA instead.  Programming is more
- * complicated, but the performance is much better.
- *
- * This interrupt handler is used only to gather statistics.
- *
- * @irq: IRQ of the SSI device
- * @dev_id: pointer to the ssi_private structure for this SSI device
+ * Interrupt handler to gather states
  */
 static irqreturn_t fsl_ssi_isr(int irq, void *dev_id)
 {
-	struct fsl_ssi_private *ssi_private = dev_id;
-	struct regmap *regs = ssi_private->regs;
+	struct fsl_ssi *ssi = dev_id;
+	struct regmap *regs = ssi->regs;
 	__be32 sisr;
 	__be32 sisr2;
 
-	/* We got an interrupt, so read the status register to see what we
-	   were interrupted for.  We mask it with the Interrupt Enable register
-	   so that we only check for events that we're interested in.
-	 */
-	regmap_read(regs, CCSR_SSI_SISR, &sisr);
+	regmap_read(regs, REG_SSI_SISR, &sisr);
 
-	sisr2 = sisr & ssi_private->soc->sisr_write_mask;
+	sisr2 = sisr & ssi->soc->sisr_write_mask;
 	/* Clear the bits that we set */
 	if (sisr2)
-		regmap_write(regs, CCSR_SSI_SISR, sisr2);
+		regmap_write(regs, REG_SSI_SISR, sisr2);
 
-	fsl_ssi_dbg_isr(&ssi_private->dbg_stats, sisr);
+	fsl_ssi_dbg_isr(&ssi->dbg_stats, sisr);
 
 	return IRQ_HANDLED;
 }
 
-/*
- * Enable/Disable all rx/tx config flags at once.
+/**
+ * Enable or disable all rx/tx config flags at once
  */
-static void fsl_ssi_rxtx_config(struct fsl_ssi_private *ssi_private,
-		bool enable)
+static void fsl_ssi_rxtx_config(struct fsl_ssi *ssi, bool enable)
 {
-	struct regmap *regs = ssi_private->regs;
-	struct fsl_ssi_rxtx_reg_val *vals = &ssi_private->rxtx_reg_val;
+	struct regmap *regs = ssi->regs;
+	struct fsl_ssi_regvals *vals = ssi->regvals;
 
 	if (enable) {
-		regmap_update_bits(regs, CCSR_SSI_SIER,
-				vals->rx.sier | vals->tx.sier,
-				vals->rx.sier | vals->tx.sier);
-		regmap_update_bits(regs, CCSR_SSI_SRCR,
-				vals->rx.srcr | vals->tx.srcr,
-				vals->rx.srcr | vals->tx.srcr);
-		regmap_update_bits(regs, CCSR_SSI_STCR,
-				vals->rx.stcr | vals->tx.stcr,
-				vals->rx.stcr | vals->tx.stcr);
+		regmap_update_bits(regs, REG_SSI_SIER,
+				   vals[RX].sier | vals[TX].sier,
+				   vals[RX].sier | vals[TX].sier);
+		regmap_update_bits(regs, REG_SSI_SRCR,
+				   vals[RX].srcr | vals[TX].srcr,
+				   vals[RX].srcr | vals[TX].srcr);
+		regmap_update_bits(regs, REG_SSI_STCR,
+				   vals[RX].stcr | vals[TX].stcr,
+				   vals[RX].stcr | vals[TX].stcr);
 	} else {
-		regmap_update_bits(regs, CCSR_SSI_SRCR,
-				vals->rx.srcr | vals->tx.srcr, 0);
-		regmap_update_bits(regs, CCSR_SSI_STCR,
-				vals->rx.stcr | vals->tx.stcr, 0);
-		regmap_update_bits(regs, CCSR_SSI_SIER,
-				vals->rx.sier | vals->tx.sier, 0);
+		regmap_update_bits(regs, REG_SSI_SRCR,
+				   vals[RX].srcr | vals[TX].srcr, 0);
+		regmap_update_bits(regs, REG_SSI_STCR,
+				   vals[RX].stcr | vals[TX].stcr, 0);
+		regmap_update_bits(regs, REG_SSI_SIER,
+				   vals[RX].sier | vals[TX].sier, 0);
 	}
 }
 
-/*
- * Clear RX or TX FIFO to remove samples from the previous
- * stream session which may be still present in the FIFO and
- * may introduce bad samples and/or channel slipping.
- *
- * Note: The SOR is not documented in recent IMX datasheet, but
- * is described in IMX51 reference manual at section 56.3.3.15.
+/**
+ * Clear remaining data in the FIFO to avoid dirty data or channel slipping
  */
-static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
-		bool is_rx)
+static void fsl_ssi_fifo_clear(struct fsl_ssi *ssi, bool is_rx)
 {
-	if (is_rx) {
-		regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
-			CCSR_SSI_SOR_RX_CLR, CCSR_SSI_SOR_RX_CLR);
-	} else {
-		regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
-			CCSR_SSI_SOR_TX_CLR, CCSR_SSI_SOR_TX_CLR);
-	}
+	bool tx = !is_rx;
+
+	regmap_update_bits(ssi->regs, REG_SSI_SOR,
+			   SSI_SOR_xX_CLR(tx), SSI_SOR_xX_CLR(tx));
 }
 
-/*
+/**
  * Calculate the bits that have to be disabled for the current stream that is
  * getting disabled. This keeps the bits enabled that are necessary for the
  * second stream to work if 'stream_active' is true.
@@ -446,261 +434,239 @@ static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
 	((vals_disable) & \
 	 ((vals_disable) ^ ((vals_stream) * (u32)!!(stream_active))))
 
-/*
- * Enable/Disable a ssi configuration. You have to pass either
- * ssi_private->rxtx_reg_val.rx or tx as vals parameter.
+/**
+ * Enable or disable SSI configuration.
  */
-static void fsl_ssi_config(struct fsl_ssi_private *ssi_private, bool enable,
-		struct fsl_ssi_reg_val *vals)
+static void fsl_ssi_config(struct fsl_ssi *ssi, bool enable,
+			   struct fsl_ssi_regvals *vals)
 {
-	struct regmap *regs = ssi_private->regs;
-	struct fsl_ssi_reg_val *avals;
+	struct regmap *regs = ssi->regs;
+	struct fsl_ssi_regvals *avals;
 	int nr_active_streams;
-	u32 scr_val;
+	u32 scr;
 	int keep_active;
 
-	regmap_read(regs, CCSR_SSI_SCR, &scr_val);
+	regmap_read(regs, REG_SSI_SCR, &scr);
 
-	nr_active_streams = !!(scr_val & CCSR_SSI_SCR_TE) +
-				!!(scr_val & CCSR_SSI_SCR_RE);
+	nr_active_streams = !!(scr & SSI_SCR_TE) + !!(scr & SSI_SCR_RE);
 
 	if (nr_active_streams - 1 > 0)
 		keep_active = 1;
 	else
 		keep_active = 0;
 
-	/* Find the other direction values rx or tx which we do not want to
-	 * modify */
-	if (&ssi_private->rxtx_reg_val.rx == vals)
-		avals = &ssi_private->rxtx_reg_val.tx;
+	/* Get the opposite direction to keep its values untouched */
+	if (&ssi->regvals[RX] == vals)
+		avals = &ssi->regvals[TX];
 	else
-		avals = &ssi_private->rxtx_reg_val.rx;
+		avals = &ssi->regvals[RX];
 
-	/* If vals should be disabled, start with disabling the unit */
 	if (!enable) {
+		/*
+		 * To keep the other stream safe, exclude shared bits between
+		 * both streams, and get safe bits to disable current stream
+		 */
 		u32 scr = fsl_ssi_disable_val(vals->scr, avals->scr,
-				keep_active);
-		regmap_update_bits(regs, CCSR_SSI_SCR, scr, 0);
+					      keep_active);
+		/* Safely disable SCR register for the stream */
+		regmap_update_bits(regs, REG_SSI_SCR, scr, 0);
 	}
 
 	/*
-	 * We are running on a SoC which does not support online SSI
-	 * reconfiguration, so we have to enable all necessary flags at once
-	 * even if we do not use them later (capture and playback configuration)
+	 * For cases where online configuration is not supported,
+	 * 1) Enable all necessary bits of both streams when 1st stream starts
+	 *    even if the opposite stream will not start
+	 * 2) Disable all remaining bits of both streams when last stream ends
 	 */
-	if (ssi_private->soc->offline_config) {
-		if ((enable && !nr_active_streams) ||
-				(!enable && !keep_active))
-			fsl_ssi_rxtx_config(ssi_private, enable);
+	if (ssi->soc->offline_config) {
+		if ((enable && !nr_active_streams) || (!enable && !keep_active))
+			fsl_ssi_rxtx_config(ssi, enable);
 
 		goto config_done;
 	}
 
-	/*
-	 * Configure single direction units while the SSI unit is running
-	 * (online configuration)
-	 */
+	/* Online configure single direction while SSI is running */
 	if (enable) {
-		fsl_ssi_fifo_clear(ssi_private, vals->scr & CCSR_SSI_SCR_RE);
+		fsl_ssi_fifo_clear(ssi, vals->scr & SSI_SCR_RE);
 
-		regmap_update_bits(regs, CCSR_SSI_SRCR, vals->srcr, vals->srcr);
-		regmap_update_bits(regs, CCSR_SSI_STCR, vals->stcr, vals->stcr);
-		regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier);
+		regmap_update_bits(regs, REG_SSI_SRCR, vals->srcr, vals->srcr);
+		regmap_update_bits(regs, REG_SSI_STCR, vals->stcr, vals->stcr);
+		regmap_update_bits(regs, REG_SSI_SIER, vals->sier, vals->sier);
 	} else {
 		u32 sier;
 		u32 srcr;
 		u32 stcr;
 
 		/*
-		 * Disabling the necessary flags for one of rx/tx while the
-		 * other stream is active is a little bit more difficult. We
-		 * have to disable only those flags that differ between both
-		 * streams (rx XOR tx) and that are set in the stream that is
-		 * disabled now. Otherwise we could alter flags of the other
-		 * stream
+		 * To keep the other stream safe, exclude shared bits between
+		 * both streams, and get safe bits to disable current stream
 		 */
-
-		/* These assignments are simply vals without bits set in avals*/
 		sier = fsl_ssi_disable_val(vals->sier, avals->sier,
-				keep_active);
+					   keep_active);
 		srcr = fsl_ssi_disable_val(vals->srcr, avals->srcr,
-				keep_active);
+					   keep_active);
 		stcr = fsl_ssi_disable_val(vals->stcr, avals->stcr,
-				keep_active);
+					   keep_active);
 
-		regmap_update_bits(regs, CCSR_SSI_SRCR, srcr, 0);
-		regmap_update_bits(regs, CCSR_SSI_STCR, stcr, 0);
-		regmap_update_bits(regs, CCSR_SSI_SIER, sier, 0);
+		/* Safely disable other control registers for the stream */
+		regmap_update_bits(regs, REG_SSI_SRCR, srcr, 0);
+		regmap_update_bits(regs, REG_SSI_STCR, stcr, 0);
+		regmap_update_bits(regs, REG_SSI_SIER, sier, 0);
 	}
 
 config_done:
 	/* Enabling of subunits is done after configuration */
 	if (enable) {
-		if (ssi_private->use_dma && (vals->scr & CCSR_SSI_SCR_TE)) {
-			/*
-			 * Be sure the Tx FIFO is filled when TE is set.
-			 * Otherwise, there are some chances to start the
-			 * playback with some void samples inserted first,
-			 * generating a channel slip.
-			 *
-			 * First, SSIEN must be set, to let the FIFO be filled.
-			 *
-			 * Notes:
-			 * - Limit this fix to the DMA case until FIQ cases can
-			 *   be tested.
-			 * - Limit the length of the busy loop to not lock the
-			 *   system too long, even if 1-2 loops are sufficient
-			 *   in general.
-			 */
+		/*
+		 * Start DMA before setting TE to avoid FIFO underrun
+		 * which may cause a channel slip or a channel swap
+		 *
+		 * TODO: FIQ cases might also need this upon testing
+		 */
+		if (ssi->use_dma && (vals->scr & SSI_SCR_TE)) {
 			int i;
 			int max_loop = 100;
-			regmap_update_bits(regs, CCSR_SSI_SCR,
-					CCSR_SSI_SCR_SSIEN, CCSR_SSI_SCR_SSIEN);
+
+			/* Enable SSI first to send TX DMA request */
+			regmap_update_bits(regs, REG_SSI_SCR,
+					   SSI_SCR_SSIEN, SSI_SCR_SSIEN);
+
+			/* Busy wait until TX FIFO not empty -- DMA working */
 			for (i = 0; i < max_loop; i++) {
 				u32 sfcsr;
-				regmap_read(regs, CCSR_SSI_SFCSR, &sfcsr);
-				if (CCSR_SSI_SFCSR_TFCNT0(sfcsr))
+				regmap_read(regs, REG_SSI_SFCSR, &sfcsr);
+				if (SSI_SFCSR_TFCNT0(sfcsr))
 					break;
 			}
 			if (i == max_loop) {
-				dev_err(ssi_private->dev,
+				dev_err(ssi->dev,
 					"Timeout waiting TX FIFO filling\n");
 			}
 		}
-		regmap_update_bits(regs, CCSR_SSI_SCR, vals->scr, vals->scr);
+		/* Enable all remaining bits */
+		regmap_update_bits(regs, REG_SSI_SCR, vals->scr, vals->scr);
 	}
 }
 
+static void fsl_ssi_rx_config(struct fsl_ssi *ssi, bool enable)
+{
+	fsl_ssi_config(ssi, enable, &ssi->regvals[RX]);
+}
 
-static void fsl_ssi_rx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_ac97_saccst_setup(struct fsl_ssi *ssi)
 {
-	fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.rx);
+	struct regmap *regs = ssi->regs;
+
+	/* no SACC{ST,EN,DIS} regs on imx21-class SSI */
+	if (!ssi->soc->imx21regs) {
+		/* Disable all channel slots */
+		regmap_write(regs, REG_SSI_SACCDIS, 0xff);
+		/* Enable slots 3 & 4 -- PCM Playback Left & Right channels */
+		regmap_write(regs, REG_SSI_SACCEN, 0x300);
+	}
 }
 
-static void fsl_ssi_tx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_config(struct fsl_ssi *ssi, bool enable)
 {
-	fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.tx);
+	/*
+	 * SACCST might be modified via AC Link by a CODEC if it sends
+	 * extra bits in their SLOTREQ requests, which'll accidentally
+	 * send valid data to slots other than normal playback slots.
+	 *
+	 * To be safe, configure SACCST right before TX starts.
+	 */
+	if (enable && fsl_ssi_is_ac97(ssi))
+		fsl_ssi_tx_ac97_saccst_setup(ssi);
+
+	fsl_ssi_config(ssi, enable, &ssi->regvals[TX]);
 }
 
-/*
- * Setup rx/tx register values used to enable/disable the streams. These will
- * be used later in fsl_ssi_config to setup the streams without the need to
- * check for all different SSI modes.
+/**
+ * Cache critical bits of SIER, SRCR, STCR and SCR to later set them safely
  */
-static void fsl_ssi_setup_reg_vals(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_regvals(struct fsl_ssi *ssi)
 {
-	struct fsl_ssi_rxtx_reg_val *reg = &ssi_private->rxtx_reg_val;
-
-	reg->rx.sier = CCSR_SSI_SIER_RFF0_EN;
-	reg->rx.srcr = CCSR_SSI_SRCR_RFEN0;
-	reg->rx.scr = 0;
-	reg->tx.sier = CCSR_SSI_SIER_TFE0_EN;
-	reg->tx.stcr = CCSR_SSI_STCR_TFEN0;
-	reg->tx.scr = 0;
-
-	if (!fsl_ssi_is_ac97(ssi_private)) {
-		reg->rx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE;
-		reg->rx.sier |= CCSR_SSI_SIER_RFF0_EN;
-		reg->tx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE;
-		reg->tx.sier |= CCSR_SSI_SIER_TFE0_EN;
+	struct fsl_ssi_regvals *vals = ssi->regvals;
+
+	vals[RX].sier = SSI_SIER_RFF0_EN;
+	vals[RX].srcr = SSI_SRCR_RFEN0;
+	vals[RX].scr = 0;
+	vals[TX].sier = SSI_SIER_TFE0_EN;
+	vals[TX].stcr = SSI_STCR_TFEN0;
+	vals[TX].scr = 0;
+
+	/* AC97 has already enabled SSIEN, RE and TE, so ignore them */
+	if (!fsl_ssi_is_ac97(ssi)) {
+		vals[RX].scr = SSI_SCR_SSIEN | SSI_SCR_RE;
+		vals[TX].scr = SSI_SCR_SSIEN | SSI_SCR_TE;
 	}
 
-	if (ssi_private->use_dma) {
-		reg->rx.sier |= CCSR_SSI_SIER_RDMAE;
-		reg->tx.sier |= CCSR_SSI_SIER_TDMAE;
+	if (ssi->use_dma) {
+		vals[RX].sier |= SSI_SIER_RDMAE;
+		vals[TX].sier |= SSI_SIER_TDMAE;
 	} else {
-		reg->rx.sier |= CCSR_SSI_SIER_RIE;
-		reg->tx.sier |= CCSR_SSI_SIER_TIE;
+		vals[RX].sier |= SSI_SIER_RIE;
+		vals[TX].sier |= SSI_SIER_TIE;
 	}
 
-	reg->rx.sier |= FSLSSI_SIER_DBG_RX_FLAGS;
-	reg->tx.sier |= FSLSSI_SIER_DBG_TX_FLAGS;
+	vals[RX].sier |= FSLSSI_SIER_DBG_RX_FLAGS;
+	vals[TX].sier |= FSLSSI_SIER_DBG_TX_FLAGS;
 }
 
-static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_ac97(struct fsl_ssi *ssi)
 {
-	struct regmap *regs = ssi_private->regs;
-
-	/*
-	 * Setup the clock control register
-	 */
-	regmap_write(regs, CCSR_SSI_STCCR,
-			CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
-	regmap_write(regs, CCSR_SSI_SRCCR,
-			CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
+	struct regmap *regs = ssi->regs;
 
-	/*
-	 * Enable AC97 mode and startup the SSI
-	 */
-	regmap_write(regs, CCSR_SSI_SACNT,
-			CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
+	/* Setup the clock control register */
+	regmap_write(regs, REG_SSI_STCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
+	regmap_write(regs, REG_SSI_SRCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
 
-	/* no SACC{ST,EN,DIS} regs on imx21-class SSI */
-	if (!ssi_private->soc->imx21regs) {
-		regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
-		regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
-	}
+	/* Enable AC97 mode and startup the SSI */
+	regmap_write(regs, REG_SSI_SACNT, SSI_SACNT_AC97EN | SSI_SACNT_FV);
 
-	/*
-	 * Enable SSI, Transmit and Receive. AC97 has to communicate with the
-	 * codec before a stream is started.
-	 */
-	regmap_update_bits(regs, CCSR_SSI_SCR,
-			CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE,
-			CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE);
+	/* AC97 has to communicate with codec before starting a stream */
+	regmap_update_bits(regs, REG_SSI_SCR,
+			   SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE,
+			   SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE);
 
-	regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_WAIT(3));
+	regmap_write(regs, REG_SSI_SOR, SSI_SOR_WAIT(3));
 }
 
-/**
- * fsl_ssi_startup: create a new substream
- *
- * This is the first function called when a stream is opened.
- *
- * If this is the first stream open, then grab the IRQ and program most of
- * the SSI registers.
- */
 static int fsl_ssi_startup(struct snd_pcm_substream *substream,
 			   struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct fsl_ssi_private *ssi_private =
-		snd_soc_dai_get_drvdata(rtd->cpu_dai);
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 	int ret;
 
-	ret = clk_prepare_enable(ssi_private->clk);
+	ret = clk_prepare_enable(ssi->clk);
 	if (ret)
 		return ret;
 
-	/* When using dual fifo mode, it is safer to ensure an even period
+	/*
+	 * When using dual fifo mode, it is safer to ensure an even period
 	 * size. If appearing to an odd number while DMA always starts its
 	 * task from fifo0, fifo1 would be neglected at the end of each
 	 * period. But SSI would still access fifo1 with an invalid data.
 	 */
-	if (ssi_private->use_dual_fifo)
+	if (ssi->use_dual_fifo)
 		snd_pcm_hw_constraint_step(substream->runtime, 0,
-				SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+					   SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
 
 	return 0;
 }
 
-/**
- * fsl_ssi_shutdown: shutdown the SSI
- *
- */
 static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
-				struct snd_soc_dai *dai)
+			     struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct fsl_ssi_private *ssi_private =
-		snd_soc_dai_get_drvdata(rtd->cpu_dai);
-
-	clk_disable_unprepare(ssi_private->clk);
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 
+	clk_disable_unprepare(ssi->clk);
 }
 
 /**
- * fsl_ssi_set_bclk - configure Digital Audio Interface bit clock
+ * Configure Digital Audio Interface bit clock
  *
  * Note: This function can be only called when using SSI as DAI master
  *
@@ -709,12 +675,13 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
  *       (In 2-channel I2S Master mode, slot_width is fixed 32)
  */
 static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
-		struct snd_soc_dai *cpu_dai,
-		struct snd_pcm_hw_params *hw_params)
+			    struct snd_soc_dai *dai,
+			    struct snd_pcm_hw_params *hw_params)
 {
-	struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
-	struct regmap *regs = ssi_private->regs;
-	int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret;
+	bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+	struct regmap *regs = ssi->regs;
+	int synchronous = ssi->cpu_dai_drv.symmetric_rates, ret;
 	u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
 	unsigned long clkrate, baudrate, tmprate;
 	unsigned int slots = params_channels(hw_params);
@@ -724,29 +691,29 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
 	bool baudclk_is_used;
 
 	/* Override slots and slot_width if being specifically set... */
-	if (ssi_private->slots)
-		slots = ssi_private->slots;
+	if (ssi->slots)
+		slots = ssi->slots;
 	/* ...but keep 32 bits if slots is 2 -- I2S Master mode */
-	if (ssi_private->slot_width && slots != 2)
-		slot_width = ssi_private->slot_width;
+	if (ssi->slot_width && slots != 2)
+		slot_width = ssi->slot_width;
 
 	/* Generate bit clock based on the slot number and slot width */
 	freq = slots * slot_width * params_rate(hw_params);
 
 	/* Don't apply it to any non-baudclk circumstance */
-	if (IS_ERR(ssi_private->baudclk))
+	if (IS_ERR(ssi->baudclk))
 		return -EINVAL;
 
 	/*
 	 * Hardware limitation: The bclk rate must be
 	 * never greater than 1/5 IPG clock rate
 	 */
-	if (freq * 5 > clk_get_rate(ssi_private->clk)) {
-		dev_err(cpu_dai->dev, "bitclk > ipgclk/5\n");
+	if (freq * 5 > clk_get_rate(ssi->clk)) {
+		dev_err(dai->dev, "bitclk > ipgclk / 5\n");
 		return -EINVAL;
 	}
 
-	baudclk_is_used = ssi_private->baudclk_streams & ~(BIT(substream->stream));
+	baudclk_is_used = ssi->baudclk_streams & ~(BIT(substream->stream));
 
 	/* It should be already enough to divide clock by setting pm alone */
 	psr = 0;
@@ -758,9 +725,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
 		tmprate = freq * factor * (i + 1);
 
 		if (baudclk_is_used)
-			clkrate = clk_get_rate(ssi_private->baudclk);
+			clkrate = clk_get_rate(ssi->baudclk);
 		else
-			clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
+			clkrate = clk_round_rate(ssi->baudclk, tmprate);
 
 		clkrate /= factor;
 		afreq = clkrate / (i + 1);
@@ -791,24 +758,22 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
 
 	/* No proper pm found if it is still remaining the initial value */
 	if (pm == 999) {
-		dev_err(cpu_dai->dev, "failed to handle the required sysclk\n");
+		dev_err(dai->dev, "failed to handle the required sysclk\n");
 		return -EINVAL;
 	}
 
-	stccr = CCSR_SSI_SxCCR_PM(pm + 1) | (div2 ? CCSR_SSI_SxCCR_DIV2 : 0) |
-		(psr ? CCSR_SSI_SxCCR_PSR : 0);
-	mask = CCSR_SSI_SxCCR_PM_MASK | CCSR_SSI_SxCCR_DIV2 |
-		CCSR_SSI_SxCCR_PSR;
+	stccr = SSI_SxCCR_PM(pm + 1) | (div2 ? SSI_SxCCR_DIV2 : 0) |
+		(psr ? SSI_SxCCR_PSR : 0);
+	mask = SSI_SxCCR_PM_MASK | SSI_SxCCR_DIV2 | SSI_SxCCR_PSR;
 
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || synchronous)
-		regmap_update_bits(regs, CCSR_SSI_STCCR, mask, stccr);
-	else
-		regmap_update_bits(regs, CCSR_SSI_SRCCR, mask, stccr);
+	/* STCCR is used for RX in synchronous mode */
+	tx2 = tx || synchronous;
+	regmap_update_bits(regs, REG_SSI_SxCCR(tx2), mask, stccr);
 
 	if (!baudclk_is_used) {
-		ret = clk_set_rate(ssi_private->baudclk, baudrate);
+		ret = clk_set_rate(ssi->baudclk, baudrate);
 		if (ret) {
-			dev_err(cpu_dai->dev, "failed to set baudclk rate\n");
+			dev_err(dai->dev, "failed to set baudclk rate\n");
 			return -EINVAL;
 		}
 	}
@@ -817,185 +782,165 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
 }
 
 /**
- * fsl_ssi_hw_params - program the sample size
- *
- * Most of the SSI registers have been programmed in the startup function,
- * but the word length must be programmed here.  Unfortunately, programming
- * the SxCCR.WL bits requires the SSI to be temporarily disabled.  This can
- * cause a problem with supporting simultaneous playback and capture.  If
- * the SSI is already playing a stream, then that stream may be temporarily
- * stopped when you start capture.
+ * Configure SSI based on PCM hardware parameters
  *
- * Note: The SxCCR.DC and SxCCR.PM bits are only used if the SSI is the
- * clock master.
+ * Notes:
+ * 1) SxCCR.WL bits are critical bits that require SSI to be temporarily
+ *    disabled on offline_config SoCs. Even for online configurable SoCs
+ *    running in synchronous mode (both TX and RX use STCCR), it is not
+ *    safe to re-configure them when both two streams start running.
+ * 2) SxCCR.PM, SxCCR.DIV2 and SxCCR.PSR bits will be configured in the
+ *    fsl_ssi_set_bclk() if SSI is the DAI clock master.
  */
 static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai)
+			     struct snd_pcm_hw_params *hw_params,
+			     struct snd_soc_dai *dai)
 {
-	struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
-	struct regmap *regs = ssi_private->regs;
+	bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+	struct regmap *regs = ssi->regs;
 	unsigned int channels = params_channels(hw_params);
 	unsigned int sample_size = params_width(hw_params);
-	u32 wl = CCSR_SSI_SxCCR_WL(sample_size);
+	u32 wl = SSI_SxCCR_WL(sample_size);
 	int ret;
-	u32 scr_val;
+	u32 scr;
 	int enabled;
 
-	regmap_read(regs, CCSR_SSI_SCR, &scr_val);
-	enabled = scr_val & CCSR_SSI_SCR_SSIEN;
+	regmap_read(regs, REG_SSI_SCR, &scr);
+	enabled = scr & SSI_SCR_SSIEN;
 
 	/*
-	 * If we're in synchronous mode, and the SSI is already enabled,
-	 * then STCCR is already set properly.
+	 * SSI is properly configured if it is enabled and running in
+	 * the synchronous mode; Note that AC97 mode is an exception
+	 * that should set separate configurations for STCCR and SRCCR
+	 * despite running in the synchronous mode.
 	 */
-	if (enabled && ssi_private->cpu_dai_drv.symmetric_rates)
+	if (enabled && ssi->cpu_dai_drv.symmetric_rates)
 		return 0;
 
-	if (fsl_ssi_is_i2s_master(ssi_private)) {
-		ret = fsl_ssi_set_bclk(substream, cpu_dai, hw_params);
+	if (fsl_ssi_is_i2s_master(ssi)) {
+		ret = fsl_ssi_set_bclk(substream, dai, hw_params);
 		if (ret)
 			return ret;
 
 		/* Do not enable the clock if it is already enabled */
-		if (!(ssi_private->baudclk_streams & BIT(substream->stream))) {
-			ret = clk_prepare_enable(ssi_private->baudclk);
+		if (!(ssi->baudclk_streams & BIT(substream->stream))) {
+			ret = clk_prepare_enable(ssi->baudclk);
 			if (ret)
 				return ret;
 
-			ssi_private->baudclk_streams |= BIT(substream->stream);
+			ssi->baudclk_streams |= BIT(substream->stream);
 		}
 	}
 
-	if (!fsl_ssi_is_ac97(ssi_private)) {
-		u8 i2smode;
-		/*
-		 * Switch to normal net mode in order to have a frame sync
-		 * signal every 32 bits instead of 16 bits
-		 */
-		if (fsl_ssi_is_i2s_cbm_cfs(ssi_private) && sample_size == 16)
-			i2smode = CCSR_SSI_SCR_I2S_MODE_NORMAL |
-				CCSR_SSI_SCR_NET;
+	if (!fsl_ssi_is_ac97(ssi)) {
+		u8 i2s_net;
+		/* Normal + Network mode to send 16-bit data in 32-bit frames */
+		if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16)
+			i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET;
 		else
-			i2smode = ssi_private->i2s_mode;
+			i2s_net = ssi->i2s_net;
 
-		regmap_update_bits(regs, CCSR_SSI_SCR,
-				CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
-				channels == 1 ? 0 : i2smode);
+		regmap_update_bits(regs, REG_SSI_SCR,
+				   SSI_SCR_I2S_NET_MASK,
+				   channels == 1 ? 0 : i2s_net);
 	}
 
-	/*
-	 * FIXME: The documentation says that SxCCR[WL] should not be
-	 * modified while the SSI is enabled.  The only time this can
-	 * happen is if we're trying to do simultaneous playback and
-	 * capture in asynchronous mode.  Unfortunately, I have been enable
-	 * to get that to work at all on the P1022DS.  Therefore, we don't
-	 * bother to disable/enable the SSI when setting SxCCR[WL], because
-	 * the SSI will stop anyway.  Maybe one day, this will get fixed.
-	 */
-
 	/* In synchronous mode, the SSI uses STCCR for capture */
-	if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ||
-	    ssi_private->cpu_dai_drv.symmetric_rates)
-		regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_WL_MASK,
-				wl);
-	else
-		regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL_MASK,
-				wl);
+	tx2 = tx || ssi->cpu_dai_drv.symmetric_rates;
+	regmap_update_bits(regs, REG_SSI_SxCCR(tx2), SSI_SxCCR_WL_MASK, wl);
 
 	return 0;
 }
 
 static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
-		struct snd_soc_dai *cpu_dai)
+			   struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct fsl_ssi_private *ssi_private =
-		snd_soc_dai_get_drvdata(rtd->cpu_dai);
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 
-	if (fsl_ssi_is_i2s_master(ssi_private) &&
-			ssi_private->baudclk_streams & BIT(substream->stream)) {
-		clk_disable_unprepare(ssi_private->baudclk);
-		ssi_private->baudclk_streams &= ~BIT(substream->stream);
+	if (fsl_ssi_is_i2s_master(ssi) &&
+	    ssi->baudclk_streams & BIT(substream->stream)) {
+		clk_disable_unprepare(ssi->baudclk);
+		ssi->baudclk_streams &= ~BIT(substream->stream);
 	}
 
 	return 0;
 }
 
 static int _fsl_ssi_set_dai_fmt(struct device *dev,
-				struct fsl_ssi_private *ssi_private,
-				unsigned int fmt)
+				struct fsl_ssi *ssi, unsigned int fmt)
 {
-	struct regmap *regs = ssi_private->regs;
+	struct regmap *regs = ssi->regs;
 	u32 strcr = 0, stcr, srcr, scr, mask;
 	u8 wm;
 
-	ssi_private->dai_fmt = fmt;
+	ssi->dai_fmt = fmt;
 
-	if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) {
-		dev_err(dev, "baudclk is missing which is necessary for master mode\n");
+	if (fsl_ssi_is_i2s_master(ssi) && IS_ERR(ssi->baudclk)) {
+		dev_err(dev, "missing baudclk for master mode\n");
 		return -EINVAL;
 	}
 
-	fsl_ssi_setup_reg_vals(ssi_private);
+	fsl_ssi_setup_regvals(ssi);
 
-	regmap_read(regs, CCSR_SSI_SCR, &scr);
-	scr &= ~(CCSR_SSI_SCR_SYN | CCSR_SSI_SCR_I2S_MODE_MASK);
-	scr |= CCSR_SSI_SCR_SYNC_TX_FS;
+	regmap_read(regs, REG_SSI_SCR, &scr);
+	scr &= ~(SSI_SCR_SYN | SSI_SCR_I2S_MODE_MASK);
+	/* Synchronize frame sync clock for TE to avoid data slipping */
+	scr |= SSI_SCR_SYNC_TX_FS;
 
-	mask = CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR |
-		CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TFSL |
-		CCSR_SSI_STCR_TEFS;
-	regmap_read(regs, CCSR_SSI_STCR, &stcr);
-	regmap_read(regs, CCSR_SSI_SRCR, &srcr);
+	mask = SSI_STCR_TXBIT0 | SSI_STCR_TFDIR | SSI_STCR_TXDIR |
+	       SSI_STCR_TSCKP | SSI_STCR_TFSI | SSI_STCR_TFSL | SSI_STCR_TEFS;
+	regmap_read(regs, REG_SSI_STCR, &stcr);
+	regmap_read(regs, REG_SSI_SRCR, &srcr);
 	stcr &= ~mask;
 	srcr &= ~mask;
 
-	ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
+	/* Use Network mode as default */
+	ssi->i2s_net = SSI_SCR_NET;
 	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
 	case SND_SOC_DAIFMT_I2S:
-		regmap_update_bits(regs, CCSR_SSI_STCCR,
-				   CCSR_SSI_SxCCR_DC_MASK,
-				   CCSR_SSI_SxCCR_DC(2));
-		regmap_update_bits(regs, CCSR_SSI_SRCCR,
-				   CCSR_SSI_SxCCR_DC_MASK,
-				   CCSR_SSI_SxCCR_DC(2));
+		regmap_update_bits(regs, REG_SSI_STCCR,
+				   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+		regmap_update_bits(regs, REG_SSI_SRCCR,
+				   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
 		switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
 		case SND_SOC_DAIFMT_CBM_CFS:
 		case SND_SOC_DAIFMT_CBS_CFS:
-			ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
+			ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER;
 			break;
 		case SND_SOC_DAIFMT_CBM_CFM:
-			ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
+			ssi->i2s_net |= SSI_SCR_I2S_MODE_SLAVE;
 			break;
 		default:
 			return -EINVAL;
 		}
 
 		/* Data on rising edge of bclk, frame low, 1clk before data */
-		strcr |= CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TSCKP |
-			CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+		strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP |
+			 SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
 		break;
 	case SND_SOC_DAIFMT_LEFT_J:
 		/* Data on rising edge of bclk, frame high */
-		strcr |= CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TSCKP;
+		strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
 		break;
 	case SND_SOC_DAIFMT_DSP_A:
 		/* Data on rising edge of bclk, frame high, 1clk before data */
-		strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
-			CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+		strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP |
+			 SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
 		break;
 	case SND_SOC_DAIFMT_DSP_B:
 		/* Data on rising edge of bclk, frame high */
-		strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
-			CCSR_SSI_STCR_TXBIT0;
+		strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP | SSI_STCR_TXBIT0;
 		break;
 	case SND_SOC_DAIFMT_AC97:
-		ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_NORMAL;
+		/* Data on falling edge of bclk, frame high, 1clk before data */
+		ssi->i2s_net |= SSI_SCR_I2S_MODE_NORMAL;
 		break;
 	default:
 		return -EINVAL;
 	}
-	scr |= ssi_private->i2s_mode;
+	scr |= ssi->i2s_net;
 
 	/* DAI clock inversion */
 	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -1004,16 +949,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
 		break;
 	case SND_SOC_DAIFMT_IB_NF:
 		/* Invert bit clock */
-		strcr ^= CCSR_SSI_STCR_TSCKP;
+		strcr ^= SSI_STCR_TSCKP;
 		break;
 	case SND_SOC_DAIFMT_NB_IF:
 		/* Invert frame clock */
-		strcr ^= CCSR_SSI_STCR_TFSI;
+		strcr ^= SSI_STCR_TFSI;
 		break;
 	case SND_SOC_DAIFMT_IB_IF:
 		/* Invert both clocks */
-		strcr ^= CCSR_SSI_STCR_TSCKP;
-		strcr ^= CCSR_SSI_STCR_TFSI;
+		strcr ^= SSI_STCR_TSCKP;
+		strcr ^= SSI_STCR_TFSI;
 		break;
 	default:
 		return -EINVAL;
@@ -1022,123 +967,122 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
 	/* DAI clock master masks */
 	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
 	case SND_SOC_DAIFMT_CBS_CFS:
-		strcr |= CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR;
-		scr |= CCSR_SSI_SCR_SYS_CLK_EN;
+		/* Output bit and frame sync clocks */
+		strcr |= SSI_STCR_TFDIR | SSI_STCR_TXDIR;
+		scr |= SSI_SCR_SYS_CLK_EN;
 		break;
 	case SND_SOC_DAIFMT_CBM_CFM:
-		scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+		/* Input bit or frame sync clocks */
+		scr &= ~SSI_SCR_SYS_CLK_EN;
 		break;
 	case SND_SOC_DAIFMT_CBM_CFS:
-		strcr &= ~CCSR_SSI_STCR_TXDIR;
-		strcr |= CCSR_SSI_STCR_TFDIR;
-		scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+		/* Input bit clock but output frame sync clock */
+		strcr &= ~SSI_STCR_TXDIR;
+		strcr |= SSI_STCR_TFDIR;
+		scr &= ~SSI_SCR_SYS_CLK_EN;
 		break;
 	default:
-		if (!fsl_ssi_is_ac97(ssi_private))
+		if (!fsl_ssi_is_ac97(ssi))
 			return -EINVAL;
 	}
 
 	stcr |= strcr;
 	srcr |= strcr;
 
-	if (ssi_private->cpu_dai_drv.symmetric_rates
-			|| fsl_ssi_is_ac97(ssi_private)) {
-		/* Need to clear RXDIR when using SYNC or AC97 mode */
-		srcr &= ~CCSR_SSI_SRCR_RXDIR;
-		scr |= CCSR_SSI_SCR_SYN;
+	/* Set SYN mode and clear RXDIR bit when using SYN or AC97 mode */
+	if (ssi->cpu_dai_drv.symmetric_rates || fsl_ssi_is_ac97(ssi)) {
+		srcr &= ~SSI_SRCR_RXDIR;
+		scr |= SSI_SCR_SYN;
 	}
 
-	regmap_write(regs, CCSR_SSI_STCR, stcr);
-	regmap_write(regs, CCSR_SSI_SRCR, srcr);
-	regmap_write(regs, CCSR_SSI_SCR, scr);
+	regmap_write(regs, REG_SSI_STCR, stcr);
+	regmap_write(regs, REG_SSI_SRCR, srcr);
+	regmap_write(regs, REG_SSI_SCR, scr);
 
-	wm = ssi_private->fifo_watermark;
+	wm = ssi->fifo_watermark;
 
-	regmap_write(regs, CCSR_SSI_SFCSR,
-			CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
-			CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm));
+	regmap_write(regs, REG_SSI_SFCSR,
+		     SSI_SFCSR_TFWM0(wm) | SSI_SFCSR_RFWM0(wm) |
+		     SSI_SFCSR_TFWM1(wm) | SSI_SFCSR_RFWM1(wm));
 
-	if (ssi_private->use_dual_fifo) {
-		regmap_update_bits(regs, CCSR_SSI_SRCR, CCSR_SSI_SRCR_RFEN1,
-				CCSR_SSI_SRCR_RFEN1);
-		regmap_update_bits(regs, CCSR_SSI_STCR, CCSR_SSI_STCR_TFEN1,
-				CCSR_SSI_STCR_TFEN1);
-		regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_TCH_EN,
-				CCSR_SSI_SCR_TCH_EN);
+	if (ssi->use_dual_fifo) {
+		regmap_update_bits(regs, REG_SSI_SRCR,
+				   SSI_SRCR_RFEN1, SSI_SRCR_RFEN1);
+		regmap_update_bits(regs, REG_SSI_STCR,
+				   SSI_STCR_TFEN1, SSI_STCR_TFEN1);
+		regmap_update_bits(regs, REG_SSI_SCR,
+				   SSI_SCR_TCH_EN, SSI_SCR_TCH_EN);
 	}
 
 	if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97)
-		fsl_ssi_setup_ac97(ssi_private);
+		fsl_ssi_setup_ac97(ssi);
 
 	return 0;
-
 }
 
 /**
- * fsl_ssi_set_dai_fmt - configure Digital Audio Interface Format.
+ * Configure Digital Audio Interface (DAI) Format
  */
-static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
-	struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
 
-	return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt);
+	/* AC97 configured DAIFMT earlier in the probe() */
+	if (fsl_ssi_is_ac97(ssi))
+		return 0;
+
+	return _fsl_ssi_set_dai_fmt(dai->dev, ssi, fmt);
 }
 
 /**
- * fsl_ssi_set_dai_tdm_slot - set TDM slot number
- *
- * Note: This function can be only called when using SSI as DAI master
+ * Set TDM slot number and slot width
  */
-static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
-				u32 rx_mask, int slots, int slot_width)
+static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
+				    u32 rx_mask, int slots, int slot_width)
 {
-	struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
-	struct regmap *regs = ssi_private->regs;
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+	struct regmap *regs = ssi->regs;
 	u32 val;
 
 	/* The word length should be 8, 10, 12, 16, 18, 20, 22 or 24 */
 	if (slot_width & 1 || slot_width < 8 || slot_width > 24) {
-		dev_err(cpu_dai->dev, "invalid slot width: %d\n", slot_width);
+		dev_err(dai->dev, "invalid slot width: %d\n", slot_width);
 		return -EINVAL;
 	}
 
 	/* The slot number should be >= 2 if using Network mode or I2S mode */
-	regmap_read(regs, CCSR_SSI_SCR, &val);
-	val &= CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET;
+	regmap_read(regs, REG_SSI_SCR, &val);
+	val &= SSI_SCR_I2S_MODE_MASK | SSI_SCR_NET;
 	if (val && slots < 2) {
-		dev_err(cpu_dai->dev, "slot number should be >= 2 in I2S or NET\n");
+		dev_err(dai->dev, "slot number should be >= 2 in I2S or NET\n");
 		return -EINVAL;
 	}
 
-	regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_DC_MASK,
-			CCSR_SSI_SxCCR_DC(slots));
-	regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_DC_MASK,
-			CCSR_SSI_SxCCR_DC(slots));
+	regmap_update_bits(regs, REG_SSI_STCCR,
+			   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
+	regmap_update_bits(regs, REG_SSI_SRCCR,
+			   SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
 
-	/* The register SxMSKs needs SSI to provide essential clock due to
-	 * hardware design. So we here temporarily enable SSI to set them.
-	 */
-	regmap_read(regs, CCSR_SSI_SCR, &val);
-	val &= CCSR_SSI_SCR_SSIEN;
-	regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN,
-			CCSR_SSI_SCR_SSIEN);
+	/* Save SSIEN bit of the SCR register */
+	regmap_read(regs, REG_SSI_SCR, &val);
+	val &= SSI_SCR_SSIEN;
+	/* Temporarily enable SSI to allow SxMSKs to be configurable */
+	regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, SSI_SCR_SSIEN);
 
-	regmap_write(regs, CCSR_SSI_STMSK, ~tx_mask);
-	regmap_write(regs, CCSR_SSI_SRMSK, ~rx_mask);
+	regmap_write(regs, REG_SSI_STMSK, ~tx_mask);
+	regmap_write(regs, REG_SSI_SRMSK, ~rx_mask);
 
-	regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val);
+	/* Restore the value of SSIEN bit */
+	regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, val);
 
-	ssi_private->slot_width = slot_width;
-	ssi_private->slots = slots;
+	ssi->slot_width = slot_width;
+	ssi->slots = slots;
 
 	return 0;
 }
 
 /**
- * fsl_ssi_trigger: start and stop the DMA transfer.
- *
- * This function is called by ALSA to start, stop, pause, and resume the DMA
- * transfer of data.
+ * Start or stop SSI and corresponding DMA transaction.
  *
  * The DMA channel is in external master start and pause mode, which
  * means the SSI completely controls the flow of data.
@@ -1147,37 +1091,38 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
 			   struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai);
-	struct regmap *regs = ssi_private->regs;
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+	struct regmap *regs = ssi->regs;
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-			fsl_ssi_tx_config(ssi_private, true);
+			fsl_ssi_tx_config(ssi, true);
 		else
-			fsl_ssi_rx_config(ssi_private, true);
+			fsl_ssi_rx_config(ssi, true);
 		break;
 
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-			fsl_ssi_tx_config(ssi_private, false);
+			fsl_ssi_tx_config(ssi, false);
 		else
-			fsl_ssi_rx_config(ssi_private, false);
+			fsl_ssi_rx_config(ssi, false);
 		break;
 
 	default:
 		return -EINVAL;
 	}
 
-	if (fsl_ssi_is_ac97(ssi_private)) {
+	/* Clear corresponding FIFO */
+	if (fsl_ssi_is_ac97(ssi)) {
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-			regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_TX_CLR);
+			regmap_write(regs, REG_SSI_SOR, SSI_SOR_TX_CLR);
 		else
-			regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_RX_CLR);
+			regmap_write(regs, REG_SSI_SOR, SSI_SOR_RX_CLR);
 	}
 
 	return 0;
@@ -1185,27 +1130,26 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
 
 static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
 {
-	struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
+	struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
 
-	if (ssi_private->soc->imx && ssi_private->use_dma) {
-		dai->playback_dma_data = &ssi_private->dma_params_tx;
-		dai->capture_dma_data = &ssi_private->dma_params_rx;
+	if (ssi->soc->imx && ssi->use_dma) {
+		dai->playback_dma_data = &ssi->dma_params_tx;
+		dai->capture_dma_data = &ssi->dma_params_rx;
 	}
 
 	return 0;
 }
 
 static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
-	.startup	= fsl_ssi_startup,
-	.shutdown       = fsl_ssi_shutdown,
-	.hw_params	= fsl_ssi_hw_params,
-	.hw_free	= fsl_ssi_hw_free,
-	.set_fmt	= fsl_ssi_set_dai_fmt,
-	.set_tdm_slot	= fsl_ssi_set_dai_tdm_slot,
-	.trigger	= fsl_ssi_trigger,
+	.startup = fsl_ssi_startup,
+	.shutdown = fsl_ssi_shutdown,
+	.hw_params = fsl_ssi_hw_params,
+	.hw_free = fsl_ssi_hw_free,
+	.set_fmt = fsl_ssi_set_dai_fmt,
+	.set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
+	.trigger = fsl_ssi_trigger,
 };
 
-/* Template for the CPU dai driver structure */
 static struct snd_soc_dai_driver fsl_ssi_dai_template = {
 	.probe = fsl_ssi_dai_probe,
 	.playback = {
@@ -1226,7 +1170,7 @@ static struct snd_soc_dai_driver fsl_ssi_dai_template = {
 };
 
 static const struct snd_soc_component_driver fsl_ssi_component = {
-	.name		= "fsl-ssi",
+	.name = "fsl-ssi",
 };
 
 static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
@@ -1237,23 +1181,23 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
 		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_48000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
 	},
 	.capture = {
 		.stream_name = "AC97 Capture",
 		.channels_min = 2,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_48000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		/* 16-bit capture is broken (errata ERR003778) */
+		.formats = SNDRV_PCM_FMTBIT_S20,
 	},
 	.ops = &fsl_ssi_dai_ops,
 };
 
-
-static struct fsl_ssi_private *fsl_ac97_data;
+static struct fsl_ssi *fsl_ac97_data;
 
 static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
-		unsigned short val)
+			       unsigned short val)
 {
 	struct regmap *regs = fsl_ac97_data->regs;
 	unsigned int lreg;
@@ -1273,13 +1217,13 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
 	}
 
 	lreg = reg <<  12;
-	regmap_write(regs, CCSR_SSI_SACADD, lreg);
+	regmap_write(regs, REG_SSI_SACADD, lreg);
 
 	lval = val << 4;
-	regmap_write(regs, CCSR_SSI_SACDAT, lval);
+	regmap_write(regs, REG_SSI_SACDAT, lval);
 
-	regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
-			CCSR_SSI_SACNT_WR);
+	regmap_update_bits(regs, REG_SSI_SACNT,
+			   SSI_SACNT_RDWR_MASK, SSI_SACNT_WR);
 	udelay(100);
 
 	clk_disable_unprepare(fsl_ac97_data->clk);
@@ -1289,10 +1233,9 @@ ret_unlock:
 }
 
 static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
-		unsigned short reg)
+					unsigned short reg)
 {
 	struct regmap *regs = fsl_ac97_data->regs;
-
 	unsigned short val = 0;
 	u32 reg_val;
 	unsigned int lreg;
@@ -1302,19 +1245,18 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
 
 	ret = clk_prepare_enable(fsl_ac97_data->clk);
 	if (ret) {
-		pr_err("ac97 read clk_prepare_enable failed: %d\n",
-			ret);
+		pr_err("ac97 read clk_prepare_enable failed: %d\n", ret);
 		goto ret_unlock;
 	}
 
 	lreg = (reg & 0x7f) <<  12;
-	regmap_write(regs, CCSR_SSI_SACADD, lreg);
-	regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
-			CCSR_SSI_SACNT_RD);
+	regmap_write(regs, REG_SSI_SACADD, lreg);
+	regmap_update_bits(regs, REG_SSI_SACNT,
+			   SSI_SACNT_RDWR_MASK, SSI_SACNT_RD);
 
 	udelay(100);
 
-	regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
+	regmap_read(regs, REG_SSI_SACDAT, &reg_val);
 	val = (reg_val >> 4) & 0xffff;
 
 	clk_disable_unprepare(fsl_ac97_data->clk);
@@ -1325,8 +1267,8 @@ ret_unlock:
 }
 
 static struct snd_ac97_bus_ops fsl_ssi_ac97_ops = {
-	.read		= fsl_ssi_ac97_read,
-	.write		= fsl_ssi_ac97_write,
+	.read = fsl_ssi_ac97_read,
+	.write = fsl_ssi_ac97_write,
 };
 
 /**
@@ -1341,70 +1283,67 @@ static void make_lowercase(char *s)
 }
 
 static int fsl_ssi_imx_probe(struct platform_device *pdev,
-		struct fsl_ssi_private *ssi_private, void __iomem *iomem)
+			     struct fsl_ssi *ssi, void __iomem *iomem)
 {
 	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
 	u32 dmas[4];
 	int ret;
 
-	if (ssi_private->has_ipg_clk_name)
-		ssi_private->clk = devm_clk_get(&pdev->dev, "ipg");
+	/* Backward compatible for a DT without ipg clock name assigned */
+	if (ssi->has_ipg_clk_name)
+		ssi->clk = devm_clk_get(dev, "ipg");
 	else
-		ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(ssi_private->clk)) {
-		ret = PTR_ERR(ssi_private->clk);
-		dev_err(&pdev->dev, "could not get clock: %d\n", ret);
+		ssi->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(ssi->clk)) {
+		ret = PTR_ERR(ssi->clk);
+		dev_err(dev, "failed to get clock: %d\n", ret);
 		return ret;
 	}
 
-	if (!ssi_private->has_ipg_clk_name) {
-		ret = clk_prepare_enable(ssi_private->clk);
+	/* Enable the clock since regmap will not handle it in this case */
+	if (!ssi->has_ipg_clk_name) {
+		ret = clk_prepare_enable(ssi->clk);
 		if (ret) {
-			dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+			dev_err(dev, "clk_prepare_enable failed: %d\n", ret);
 			return ret;
 		}
 	}
 
-	/* For those SLAVE implementations, we ignore non-baudclk cases
-	 * and, instead, abandon MASTER mode that needs baud clock.
-	 */
-	ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
-	if (IS_ERR(ssi_private->baudclk))
-		dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
-			 PTR_ERR(ssi_private->baudclk));
+	/* Do not error out for slave cases that live without a baud clock */
+	ssi->baudclk = devm_clk_get(dev, "baud");
+	if (IS_ERR(ssi->baudclk))
+		dev_dbg(dev, "failed to get baud clock: %ld\n",
+			 PTR_ERR(ssi->baudclk));
 
-	ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
-	ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
-	ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
-	ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
+	ssi->dma_params_tx.maxburst = ssi->dma_maxburst;
+	ssi->dma_params_rx.maxburst = ssi->dma_maxburst;
+	ssi->dma_params_tx.addr = ssi->ssi_phys + REG_SSI_STX0;
+	ssi->dma_params_rx.addr = ssi->ssi_phys + REG_SSI_SRX0;
 
+	/* Set to dual FIFO mode according to the SDMA sciprt */
 	ret = of_property_read_u32_array(np, "dmas", dmas, 4);
-	if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
-		ssi_private->use_dual_fifo = true;
-		/* When using dual fifo mode, we need to keep watermark
-		 * as even numbers due to dma script limitation.
+	if (ssi->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+		ssi->use_dual_fifo = true;
+		/*
+		 * Use even numbers to avoid channel swap due to SDMA
+		 * script design
 		 */
-		ssi_private->dma_params_tx.maxburst &= ~0x1;
-		ssi_private->dma_params_rx.maxburst &= ~0x1;
+		ssi->dma_params_tx.maxburst &= ~0x1;
+		ssi->dma_params_rx.maxburst &= ~0x1;
 	}
 
-	if (!ssi_private->use_dma) {
-
+	if (!ssi->use_dma) {
 		/*
-		 * Some boards use an incompatible codec. To get it
-		 * working, we are using imx-fiq-pcm-audio, that
-		 * can handle those codecs. DMA is not possible in this
-		 * situation.
+		 * Some boards use an incompatible codec. Use imx-fiq-pcm-audio
+		 * to get it working, as DMA is not possible in this situation.
 		 */
+		ssi->fiq_params.irq = ssi->irq;
+		ssi->fiq_params.base = iomem;
+		ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
+		ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
-		ssi_private->fiq_params.irq = ssi_private->irq;
-		ssi_private->fiq_params.base = iomem;
-		ssi_private->fiq_params.dma_params_rx =
-			&ssi_private->dma_params_rx;
-		ssi_private->fiq_params.dma_params_tx =
-			&ssi_private->dma_params_tx;
-
-		ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
+		ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
 		if (ret)
 			goto error_pcm;
 	} else {
@@ -1416,26 +1355,26 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
 	return 0;
 
 error_pcm:
+	if (!ssi->has_ipg_clk_name)
+		clk_disable_unprepare(ssi->clk);
 
-	if (!ssi_private->has_ipg_clk_name)
-		clk_disable_unprepare(ssi_private->clk);
 	return ret;
 }
 
-static void fsl_ssi_imx_clean(struct platform_device *pdev,
-		struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_imx_clean(struct platform_device *pdev, struct fsl_ssi *ssi)
 {
-	if (!ssi_private->use_dma)
+	if (!ssi->use_dma)
 		imx_pcm_fiq_exit(pdev);
-	if (!ssi_private->has_ipg_clk_name)
-		clk_disable_unprepare(ssi_private->clk);
+	if (!ssi->has_ipg_clk_name)
+		clk_disable_unprepare(ssi->clk);
 }
 
 static int fsl_ssi_probe(struct platform_device *pdev)
 {
-	struct fsl_ssi_private *ssi_private;
+	struct fsl_ssi *ssi;
 	int ret = 0;
 	struct device_node *np = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
 	const struct of_device_id *of_id;
 	const char *p, *sprop;
 	const uint32_t *iprop;
@@ -1444,185 +1383,159 @@ static int fsl_ssi_probe(struct platform_device *pdev)
 	char name[64];
 	struct regmap_config regconfig = fsl_ssi_regconfig;
 
-	of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
+	of_id = of_match_device(fsl_ssi_ids, dev);
 	if (!of_id || !of_id->data)
 		return -EINVAL;
 
-	ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private),
-			GFP_KERNEL);
-	if (!ssi_private)
+	ssi = devm_kzalloc(dev, sizeof(*ssi), GFP_KERNEL);
+	if (!ssi)
 		return -ENOMEM;
 
-	ssi_private->soc = of_id->data;
-	ssi_private->dev = &pdev->dev;
+	ssi->soc = of_id->data;
+	ssi->dev = dev;
 
+	/* Check if being used in AC97 mode */
 	sprop = of_get_property(np, "fsl,mode", NULL);
 	if (sprop) {
 		if (!strcmp(sprop, "ac97-slave"))
-			ssi_private->dai_fmt = SND_SOC_DAIFMT_AC97;
+			ssi->dai_fmt = SND_SOC_DAIFMT_AC97;
 	}
 
-	ssi_private->use_dma = !of_property_read_bool(np,
-			"fsl,fiq-stream-filter");
-
-	if (fsl_ssi_is_ac97(ssi_private)) {
-		memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_ac97_dai,
-				sizeof(fsl_ssi_ac97_dai));
+	/* Select DMA or FIQ */
+	ssi->use_dma = !of_property_read_bool(np, "fsl,fiq-stream-filter");
 
-		fsl_ac97_data = ssi_private;
+	if (fsl_ssi_is_ac97(ssi)) {
+		memcpy(&ssi->cpu_dai_drv, &fsl_ssi_ac97_dai,
+		       sizeof(fsl_ssi_ac97_dai));
+		fsl_ac97_data = ssi;
 	} else {
-		/* Initialize this copy of the CPU DAI driver structure */
-		memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
+		memcpy(&ssi->cpu_dai_drv, &fsl_ssi_dai_template,
 		       sizeof(fsl_ssi_dai_template));
 	}
-	ssi_private->cpu_dai_drv.name = dev_name(&pdev->dev);
+	ssi->cpu_dai_drv.name = dev_name(dev);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	iomem = devm_ioremap_resource(&pdev->dev, res);
+	iomem = devm_ioremap_resource(dev, res);
 	if (IS_ERR(iomem))
 		return PTR_ERR(iomem);
-	ssi_private->ssi_phys = res->start;
+	ssi->ssi_phys = res->start;
 
-	if (ssi_private->soc->imx21regs) {
-		/*
-		 * According to datasheet imx21-class SSI
-		 * don't have SACC{ST,EN,DIS} regs.
-		 */
-		regconfig.max_register = CCSR_SSI_SRMSK;
+	if (ssi->soc->imx21regs) {
+		/* No SACC{ST,EN,DIS} regs in imx21-class SSI */
+		regconfig.max_register = REG_SSI_SRMSK;
 		regconfig.num_reg_defaults_raw =
-			CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
+			REG_SSI_SRMSK / sizeof(uint32_t) + 1;
 	}
 
 	ret = of_property_match_string(np, "clock-names", "ipg");
 	if (ret < 0) {
-		ssi_private->has_ipg_clk_name = false;
-		ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
-			&regconfig);
+		ssi->has_ipg_clk_name = false;
+		ssi->regs = devm_regmap_init_mmio(dev, iomem, &regconfig);
 	} else {
-		ssi_private->has_ipg_clk_name = true;
-		ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
-			"ipg", iomem, &regconfig);
+		ssi->has_ipg_clk_name = true;
+		ssi->regs = devm_regmap_init_mmio_clk(dev, "ipg", iomem,
+						      &regconfig);
 	}
-	if (IS_ERR(ssi_private->regs)) {
-		dev_err(&pdev->dev, "Failed to init register map\n");
-		return PTR_ERR(ssi_private->regs);
+	if (IS_ERR(ssi->regs)) {
+		dev_err(dev, "failed to init register map\n");
+		return PTR_ERR(ssi->regs);
 	}
 
-	ssi_private->irq = platform_get_irq(pdev, 0);
-	if (ssi_private->irq < 0) {
-		dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
-		return ssi_private->irq;
+	ssi->irq = platform_get_irq(pdev, 0);
+	if (ssi->irq < 0) {
+		dev_err(dev, "no irq for node %s\n", pdev->name);
+		return ssi->irq;
 	}
 
-	/* Are the RX and the TX clocks locked? */
+	/* Set software limitations for synchronous mode */
 	if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
-		if (!fsl_ssi_is_ac97(ssi_private))
-			ssi_private->cpu_dai_drv.symmetric_rates = 1;
+		if (!fsl_ssi_is_ac97(ssi)) {
+			ssi->cpu_dai_drv.symmetric_rates = 1;
+			ssi->cpu_dai_drv.symmetric_samplebits = 1;
+		}
 
-		ssi_private->cpu_dai_drv.symmetric_channels = 1;
-		ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
+		ssi->cpu_dai_drv.symmetric_channels = 1;
 	}
 
-	/* Determine the FIFO depth. */
+	/* Fetch FIFO depth; Set to 8 for older DT without this property */
 	iprop = of_get_property(np, "fsl,fifo-depth", NULL);
 	if (iprop)
-		ssi_private->fifo_depth = be32_to_cpup(iprop);
+		ssi->fifo_depth = be32_to_cpup(iprop);
 	else
-                /* Older 8610 DTs didn't have the fifo-depth property */
-		ssi_private->fifo_depth = 8;
+		ssi->fifo_depth = 8;
 
 	/*
-	 * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
-	 * use FIFO 1 but set the watermark appropriately nontheless.
-	 * We program the transmit water to signal a DMA transfer
-	 * if there are N elements left in the FIFO. For chips with 15-deep
-	 * FIFOs, set watermark to 8.  This allows the SSI to operate at a
-	 * high data rate without channel slipping. Behavior is unchanged
-	 * for the older chips with a fifo depth of only 8.  A value of 4
-	 * might be appropriate for the older chips, but is left at
-	 * fifo_depth-2 until sombody has a chance to test.
+	 * Configure TX and RX DMA watermarks -- when to send a DMA request
 	 *
-	 * We set the watermark on the same level as the DMA burstsize.  For
-	 * fiq it is probably better to use the biggest possible watermark
-	 * size.
+	 * Values should be tested to avoid FIFO under/over run. Set maxburst
+	 * to fifo_watermark to maxiumize DMA transaction to reduce overhead.
 	 */
-	switch (ssi_private->fifo_depth) {
+	switch (ssi->fifo_depth) {
 	case 15:
 		/*
-		 * 2 samples is not enough when running at high data
-		 * rates (like 48kHz @ 16 bits/channel, 16 channels)
-		 * 8 seems to split things evenly and leave enough time
-		 * for the DMA to fill the FIFO before it's over/under
-		 * run.
+		 * Set to 8 as a balanced configuration -- When TX FIFO has 8
+		 * empty slots, send a DMA request to fill these 8 slots. The
+		 * remaining 7 slots should be able to allow DMA to finish the
+		 * transaction before TX FIFO underruns; Same applies to RX.
+		 *
+		 * Tested with cases running at 48kHz @ 16 bits x 16 channels
 		 */
-		ssi_private->fifo_watermark = 8;
-		ssi_private->dma_maxburst = 8;
+		ssi->fifo_watermark = 8;
+		ssi->dma_maxburst = 8;
 		break;
 	case 8:
 	default:
-		/*
-		 * maintain old behavior for older chips.
-		 * Keeping it the same because I don't have an older
-		 * board to test with.
-		 * I suspect this could be changed to be something to
-		 * leave some more space in the fifo.
-		 */
-		ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
-		ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+		/* Safely use old watermark configurations for older chips */
+		ssi->fifo_watermark = ssi->fifo_depth - 2;
+		ssi->dma_maxburst = ssi->fifo_depth - 2;
 		break;
 	}
 
-	dev_set_drvdata(&pdev->dev, ssi_private);
+	dev_set_drvdata(dev, ssi);
 
-	if (ssi_private->soc->imx) {
-		ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem);
+	if (ssi->soc->imx) {
+		ret = fsl_ssi_imx_probe(pdev, ssi, iomem);
 		if (ret)
 			return ret;
 	}
 
-	if (fsl_ssi_is_ac97(ssi_private)) {
-		mutex_init(&ssi_private->ac97_reg_lock);
+	if (fsl_ssi_is_ac97(ssi)) {
+		mutex_init(&ssi->ac97_reg_lock);
 		ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
 		if (ret) {
-			dev_err(&pdev->dev, "could not set AC'97 ops\n");
+			dev_err(dev, "failed to set AC'97 ops\n");
 			goto error_ac97_ops;
 		}
 	}
 
-	ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
-					      &ssi_private->cpu_dai_drv, 1);
+	ret = devm_snd_soc_register_component(dev, &fsl_ssi_component,
+					      &ssi->cpu_dai_drv, 1);
 	if (ret) {
-		dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+		dev_err(dev, "failed to register DAI: %d\n", ret);
 		goto error_asoc_register;
 	}
 
-	if (ssi_private->use_dma) {
-		ret = devm_request_irq(&pdev->dev, ssi_private->irq,
-					fsl_ssi_isr, 0, dev_name(&pdev->dev),
-					ssi_private);
+	if (ssi->use_dma) {
+		ret = devm_request_irq(dev, ssi->irq, fsl_ssi_isr, 0,
+				       dev_name(dev), ssi);
 		if (ret < 0) {
-			dev_err(&pdev->dev, "could not claim irq %u\n",
-					ssi_private->irq);
+			dev_err(dev, "failed to claim irq %u\n", ssi->irq);
 			goto error_asoc_register;
 		}
 	}
 
-	ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
+	ret = fsl_ssi_debugfs_create(&ssi->dbg_stats, dev);
 	if (ret)
 		goto error_asoc_register;
 
-	/*
-	 * If codec-handle property is missing from SSI node, we assume
-	 * that the machine driver uses new binding which does not require
-	 * SSI driver to trigger machine driver's probe.
-	 */
+	/* Bypass it if using newer DT bindings of ASoC machine drivers */
 	if (!of_get_property(np, "codec-handle", NULL))
 		goto done;
 
-	/* Trigger the machine driver's probe function.  The platform driver
-	 * name of the machine driver is taken from /compatible property of the
-	 * device tree.  We also pass the address of the CPU DAI driver
-	 * structure.
+	/*
+	 * Backward compatible for older bindings by manually triggering the
+	 * machine driver's probe(). Use /compatible property, including the
+	 * address of CPU DAI driver structure, as the name of machine driver.
 	 */
 	sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL);
 	/* Sometimes the compatible name has a "fsl," prefix, so we strip it. */
@@ -1632,34 +1545,31 @@ static int fsl_ssi_probe(struct platform_device *pdev)
 	snprintf(name, sizeof(name), "snd-soc-%s", sprop);
 	make_lowercase(name);
 
-	ssi_private->pdev =
-		platform_device_register_data(&pdev->dev, name, 0, NULL, 0);
-	if (IS_ERR(ssi_private->pdev)) {
-		ret = PTR_ERR(ssi_private->pdev);
-		dev_err(&pdev->dev, "failed to register platform: %d\n", ret);
+	ssi->pdev = platform_device_register_data(dev, name, 0, NULL, 0);
+	if (IS_ERR(ssi->pdev)) {
+		ret = PTR_ERR(ssi->pdev);
+		dev_err(dev, "failed to register platform: %d\n", ret);
 		goto error_sound_card;
 	}
 
 done:
-	if (ssi_private->dai_fmt)
-		_fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
-				     ssi_private->dai_fmt);
+	if (ssi->dai_fmt)
+		_fsl_ssi_set_dai_fmt(dev, ssi, ssi->dai_fmt);
 
-	if (fsl_ssi_is_ac97(ssi_private)) {
+	if (fsl_ssi_is_ac97(ssi)) {
 		u32 ssi_idx;
 
 		ret = of_property_read_u32(np, "cell-index", &ssi_idx);
 		if (ret) {
-			dev_err(&pdev->dev, "cannot get SSI index property\n");
+			dev_err(dev, "failed to get SSI index property\n");
 			goto error_sound_card;
 		}
 
-		ssi_private->pdev =
-			platform_device_register_data(NULL,
-					"ac97-codec", ssi_idx, NULL, 0);
-		if (IS_ERR(ssi_private->pdev)) {
-			ret = PTR_ERR(ssi_private->pdev);
-			dev_err(&pdev->dev,
+		ssi->pdev = platform_device_register_data(NULL, "ac97-codec",
+							  ssi_idx, NULL, 0);
+		if (IS_ERR(ssi->pdev)) {
+			ret = PTR_ERR(ssi->pdev);
+			dev_err(dev,
 				"failed to register AC97 codec platform: %d\n",
 				ret);
 			goto error_sound_card;
@@ -1669,37 +1579,35 @@ done:
 	return 0;
 
 error_sound_card:
-	fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
-
+	fsl_ssi_debugfs_remove(&ssi->dbg_stats);
 error_asoc_register:
-	if (fsl_ssi_is_ac97(ssi_private))
+	if (fsl_ssi_is_ac97(ssi))
 		snd_soc_set_ac97_ops(NULL);
-
 error_ac97_ops:
-	if (fsl_ssi_is_ac97(ssi_private))
-		mutex_destroy(&ssi_private->ac97_reg_lock);
+	if (fsl_ssi_is_ac97(ssi))
+		mutex_destroy(&ssi->ac97_reg_lock);
 
-	if (ssi_private->soc->imx)
-		fsl_ssi_imx_clean(pdev, ssi_private);
+	if (ssi->soc->imx)
+		fsl_ssi_imx_clean(pdev, ssi);
 
 	return ret;
 }
 
 static int fsl_ssi_remove(struct platform_device *pdev)
 {
-	struct fsl_ssi_private *ssi_private = dev_get_drvdata(&pdev->dev);
+	struct fsl_ssi *ssi = dev_get_drvdata(&pdev->dev);
 
-	fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
+	fsl_ssi_debugfs_remove(&ssi->dbg_stats);
 
-	if (ssi_private->pdev)
-		platform_device_unregister(ssi_private->pdev);
+	if (ssi->pdev)
+		platform_device_unregister(ssi->pdev);
 
-	if (ssi_private->soc->imx)
-		fsl_ssi_imx_clean(pdev, ssi_private);
+	if (ssi->soc->imx)
+		fsl_ssi_imx_clean(pdev, ssi);
 
-	if (fsl_ssi_is_ac97(ssi_private)) {
+	if (fsl_ssi_is_ac97(ssi)) {
 		snd_soc_set_ac97_ops(NULL);
-		mutex_destroy(&ssi_private->ac97_reg_lock);
+		mutex_destroy(&ssi->ac97_reg_lock);
 	}
 
 	return 0;
@@ -1708,13 +1616,11 @@ static int fsl_ssi_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int fsl_ssi_suspend(struct device *dev)
 {
-	struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
-	struct regmap *regs = ssi_private->regs;
+	struct fsl_ssi *ssi = dev_get_drvdata(dev);
+	struct regmap *regs = ssi->regs;
 
-	regmap_read(regs, CCSR_SSI_SFCSR,
-			&ssi_private->regcache_sfcsr);
-	regmap_read(regs, CCSR_SSI_SACNT,
-			&ssi_private->regcache_sacnt);
+	regmap_read(regs, REG_SSI_SFCSR, &ssi->regcache_sfcsr);
+	regmap_read(regs, REG_SSI_SACNT, &ssi->regcache_sacnt);
 
 	regcache_cache_only(regs, true);
 	regcache_mark_dirty(regs);
@@ -1724,17 +1630,16 @@ static int fsl_ssi_suspend(struct device *dev)
 
 static int fsl_ssi_resume(struct device *dev)
 {
-	struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
-	struct regmap *regs = ssi_private->regs;
+	struct fsl_ssi *ssi = dev_get_drvdata(dev);
+	struct regmap *regs = ssi->regs;
 
 	regcache_cache_only(regs, false);
 
-	regmap_update_bits(regs, CCSR_SSI_SFCSR,
-			CCSR_SSI_SFCSR_RFWM1_MASK | CCSR_SSI_SFCSR_TFWM1_MASK |
-			CCSR_SSI_SFCSR_RFWM0_MASK | CCSR_SSI_SFCSR_TFWM0_MASK,
-			ssi_private->regcache_sfcsr);
-	regmap_write(regs, CCSR_SSI_SACNT,
-			ssi_private->regcache_sacnt);
+	regmap_update_bits(regs, REG_SSI_SFCSR,
+			   SSI_SFCSR_RFWM1_MASK | SSI_SFCSR_TFWM1_MASK |
+			   SSI_SFCSR_RFWM0_MASK | SSI_SFCSR_TFWM0_MASK,
+			   ssi->regcache_sfcsr);
+	regmap_write(regs, REG_SSI_SACNT, ssi->regcache_sacnt);
 
 	return regcache_sync(regs);
 }
diff --git a/sound/soc/fsl/fsl_ssi.h b/sound/soc/fsl/fsl_ssi.h
index 506510540d0acbf0c977bd730e5b30a4cc83d3b7..de2fdc5db72604f752b55a6d7f993bc0509f0036 100644
--- a/sound/soc/fsl/fsl_ssi.h
+++ b/sound/soc/fsl/fsl_ssi.h
@@ -1,5 +1,5 @@
 /*
- * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 SoC
+ * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 and i.MX SoC
  *
  * Author: Timur Tabi <timur@freescale.com>
  *
@@ -12,198 +12,261 @@
 #ifndef _MPC8610_I2S_H
 #define _MPC8610_I2S_H
 
-/* SSI registers */
-#define CCSR_SSI_STX0			0x00
-#define CCSR_SSI_STX1			0x04
-#define CCSR_SSI_SRX0			0x08
-#define CCSR_SSI_SRX1			0x0c
-#define CCSR_SSI_SCR			0x10
-#define CCSR_SSI_SISR			0x14
-#define CCSR_SSI_SIER			0x18
-#define CCSR_SSI_STCR			0x1c
-#define CCSR_SSI_SRCR			0x20
-#define CCSR_SSI_STCCR			0x24
-#define CCSR_SSI_SRCCR			0x28
-#define CCSR_SSI_SFCSR			0x2c
-#define CCSR_SSI_STR			0x30
-#define CCSR_SSI_SOR			0x34
-#define CCSR_SSI_SACNT			0x38
-#define CCSR_SSI_SACADD			0x3c
-#define CCSR_SSI_SACDAT			0x40
-#define CCSR_SSI_SATAG			0x44
-#define CCSR_SSI_STMSK			0x48
-#define CCSR_SSI_SRMSK			0x4c
-#define CCSR_SSI_SACCST			0x50
-#define CCSR_SSI_SACCEN			0x54
-#define CCSR_SSI_SACCDIS		0x58
+#define RX 0
+#define TX 1
 
-#define CCSR_SSI_SCR_SYNC_TX_FS		0x00001000
-#define CCSR_SSI_SCR_RFR_CLK_DIS	0x00000800
-#define CCSR_SSI_SCR_TFR_CLK_DIS	0x00000400
-#define CCSR_SSI_SCR_TCH_EN		0x00000100
-#define CCSR_SSI_SCR_SYS_CLK_EN		0x00000080
-#define CCSR_SSI_SCR_I2S_MODE_MASK	0x00000060
-#define CCSR_SSI_SCR_I2S_MODE_NORMAL	0x00000000
-#define CCSR_SSI_SCR_I2S_MODE_MASTER	0x00000020
-#define CCSR_SSI_SCR_I2S_MODE_SLAVE	0x00000040
-#define CCSR_SSI_SCR_SYN		0x00000010
-#define CCSR_SSI_SCR_NET		0x00000008
-#define CCSR_SSI_SCR_RE			0x00000004
-#define CCSR_SSI_SCR_TE			0x00000002
-#define CCSR_SSI_SCR_SSIEN		0x00000001
+/* -- SSI Register Map -- */
 
-#define CCSR_SSI_SISR_RFRC		0x01000000
-#define CCSR_SSI_SISR_TFRC		0x00800000
-#define CCSR_SSI_SISR_CMDAU		0x00040000
-#define CCSR_SSI_SISR_CMDDU		0x00020000
-#define CCSR_SSI_SISR_RXT		0x00010000
-#define CCSR_SSI_SISR_RDR1		0x00008000
-#define CCSR_SSI_SISR_RDR0		0x00004000
-#define CCSR_SSI_SISR_TDE1		0x00002000
-#define CCSR_SSI_SISR_TDE0		0x00001000
-#define CCSR_SSI_SISR_ROE1		0x00000800
-#define CCSR_SSI_SISR_ROE0		0x00000400
-#define CCSR_SSI_SISR_TUE1		0x00000200
-#define CCSR_SSI_SISR_TUE0		0x00000100
-#define CCSR_SSI_SISR_TFS		0x00000080
-#define CCSR_SSI_SISR_RFS		0x00000040
-#define CCSR_SSI_SISR_TLS		0x00000020
-#define CCSR_SSI_SISR_RLS		0x00000010
-#define CCSR_SSI_SISR_RFF1		0x00000008
-#define CCSR_SSI_SISR_RFF0		0x00000004
-#define CCSR_SSI_SISR_TFE1		0x00000002
-#define CCSR_SSI_SISR_TFE0		0x00000001
+/* SSI Transmit Data Register 0 */
+#define REG_SSI_STX0			0x00
+/* SSI Transmit Data Register 1 */
+#define REG_SSI_STX1			0x04
+/* SSI Receive Data Register 0 */
+#define REG_SSI_SRX0			0x08
+/* SSI Receive Data Register 1 */
+#define REG_SSI_SRX1			0x0c
+/* SSI Control Register */
+#define REG_SSI_SCR			0x10
+/* SSI Interrupt Status Register */
+#define REG_SSI_SISR			0x14
+/* SSI Interrupt Enable Register */
+#define REG_SSI_SIER			0x18
+/* SSI Transmit Configuration Register */
+#define REG_SSI_STCR			0x1c
+/* SSI Receive Configuration Register */
+#define REG_SSI_SRCR			0x20
+#define REG_SSI_SxCR(tx)		((tx) ? REG_SSI_STCR : REG_SSI_SRCR)
+/* SSI Transmit Clock Control Register */
+#define REG_SSI_STCCR			0x24
+/* SSI Receive Clock Control Register */
+#define REG_SSI_SRCCR			0x28
+#define REG_SSI_SxCCR(tx)		((tx) ? REG_SSI_STCCR : REG_SSI_SRCCR)
+/* SSI FIFO Control/Status Register */
+#define REG_SSI_SFCSR			0x2c
+/*
+ * SSI Test Register (Intended for debugging purposes only)
+ *
+ * Note: STR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.14
+ */
+#define REG_SSI_STR			0x30
+/*
+ * SSI Option Register (Intended for internal use only)
+ *
+ * Note: SOR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.15
+ */
+#define REG_SSI_SOR			0x34
+/* SSI AC97 Control Register */
+#define REG_SSI_SACNT			0x38
+/* SSI AC97 Command Address Register */
+#define REG_SSI_SACADD			0x3c
+/* SSI AC97 Command Data Register */
+#define REG_SSI_SACDAT			0x40
+/* SSI AC97 Tag Register */
+#define REG_SSI_SATAG			0x44
+/* SSI Transmit Time Slot Mask Register */
+#define REG_SSI_STMSK			0x48
+/* SSI  Receive Time Slot Mask Register */
+#define REG_SSI_SRMSK			0x4c
+#define REG_SSI_SxMSK(tx)		((tx) ? REG_SSI_STMSK : REG_SSI_SRMSK)
+/*
+ * SSI AC97 Channel Status Register
+ *
+ * The status could be changed by:
+ * 1) Writing a '1' bit at some position in SACCEN sets relevant bit in SACCST
+ * 2) Writing a '1' bit at some position in SACCDIS unsets the relevant bit
+ * 3) Receivng a '1' in SLOTREQ bit from external CODEC via AC Link
+ */
+#define REG_SSI_SACCST			0x50
+/* SSI AC97 Channel Enable Register -- Set bits in SACCST */
+#define REG_SSI_SACCEN			0x54
+/* SSI AC97 Channel Disable Register -- Clear bits in SACCST */
+#define REG_SSI_SACCDIS			0x58
+
+/* -- SSI Register Field Maps -- */
 
-#define CCSR_SSI_SIER_RFRC_EN		0x01000000
-#define CCSR_SSI_SIER_TFRC_EN		0x00800000
-#define CCSR_SSI_SIER_RDMAE		0x00400000
-#define CCSR_SSI_SIER_RIE		0x00200000
-#define CCSR_SSI_SIER_TDMAE		0x00100000
-#define CCSR_SSI_SIER_TIE		0x00080000
-#define CCSR_SSI_SIER_CMDAU_EN		0x00040000
-#define CCSR_SSI_SIER_CMDDU_EN		0x00020000
-#define CCSR_SSI_SIER_RXT_EN		0x00010000
-#define CCSR_SSI_SIER_RDR1_EN		0x00008000
-#define CCSR_SSI_SIER_RDR0_EN		0x00004000
-#define CCSR_SSI_SIER_TDE1_EN		0x00002000
-#define CCSR_SSI_SIER_TDE0_EN		0x00001000
-#define CCSR_SSI_SIER_ROE1_EN		0x00000800
-#define CCSR_SSI_SIER_ROE0_EN		0x00000400
-#define CCSR_SSI_SIER_TUE1_EN		0x00000200
-#define CCSR_SSI_SIER_TUE0_EN		0x00000100
-#define CCSR_SSI_SIER_TFS_EN		0x00000080
-#define CCSR_SSI_SIER_RFS_EN		0x00000040
-#define CCSR_SSI_SIER_TLS_EN		0x00000020
-#define CCSR_SSI_SIER_RLS_EN		0x00000010
-#define CCSR_SSI_SIER_RFF1_EN		0x00000008
-#define CCSR_SSI_SIER_RFF0_EN		0x00000004
-#define CCSR_SSI_SIER_TFE1_EN		0x00000002
-#define CCSR_SSI_SIER_TFE0_EN		0x00000001
+/* SSI Control Register -- REG_SSI_SCR 0x10 */
+#define SSI_SCR_SYNC_TX_FS		0x00001000
+#define SSI_SCR_RFR_CLK_DIS		0x00000800
+#define SSI_SCR_TFR_CLK_DIS		0x00000400
+#define SSI_SCR_TCH_EN			0x00000100
+#define SSI_SCR_SYS_CLK_EN		0x00000080
+#define SSI_SCR_I2S_MODE_MASK		0x00000060
+#define SSI_SCR_I2S_MODE_NORMAL		0x00000000
+#define SSI_SCR_I2S_MODE_MASTER		0x00000020
+#define SSI_SCR_I2S_MODE_SLAVE		0x00000040
+#define SSI_SCR_SYN			0x00000010
+#define SSI_SCR_NET			0x00000008
+#define SSI_SCR_I2S_NET_MASK		(SSI_SCR_NET | SSI_SCR_I2S_MODE_MASK)
+#define SSI_SCR_RE			0x00000004
+#define SSI_SCR_TE			0x00000002
+#define SSI_SCR_SSIEN			0x00000001
 
-#define CCSR_SSI_STCR_TXBIT0		0x00000200
-#define CCSR_SSI_STCR_TFEN1		0x00000100
-#define CCSR_SSI_STCR_TFEN0		0x00000080
-#define CCSR_SSI_STCR_TFDIR		0x00000040
-#define CCSR_SSI_STCR_TXDIR		0x00000020
-#define CCSR_SSI_STCR_TSHFD		0x00000010
-#define CCSR_SSI_STCR_TSCKP		0x00000008
-#define CCSR_SSI_STCR_TFSI		0x00000004
-#define CCSR_SSI_STCR_TFSL		0x00000002
-#define CCSR_SSI_STCR_TEFS		0x00000001
+/* SSI Interrupt Status Register -- REG_SSI_SISR 0x14 */
+#define SSI_SISR_RFRC			0x01000000
+#define SSI_SISR_TFRC			0x00800000
+#define SSI_SISR_CMDAU			0x00040000
+#define SSI_SISR_CMDDU			0x00020000
+#define SSI_SISR_RXT			0x00010000
+#define SSI_SISR_RDR1			0x00008000
+#define SSI_SISR_RDR0			0x00004000
+#define SSI_SISR_TDE1			0x00002000
+#define SSI_SISR_TDE0			0x00001000
+#define SSI_SISR_ROE1			0x00000800
+#define SSI_SISR_ROE0			0x00000400
+#define SSI_SISR_TUE1			0x00000200
+#define SSI_SISR_TUE0			0x00000100
+#define SSI_SISR_TFS			0x00000080
+#define SSI_SISR_RFS			0x00000040
+#define SSI_SISR_TLS			0x00000020
+#define SSI_SISR_RLS			0x00000010
+#define SSI_SISR_RFF1			0x00000008
+#define SSI_SISR_RFF0			0x00000004
+#define SSI_SISR_TFE1			0x00000002
+#define SSI_SISR_TFE0			0x00000001
 
-#define CCSR_SSI_SRCR_RXEXT		0x00000400
-#define CCSR_SSI_SRCR_RXBIT0		0x00000200
-#define CCSR_SSI_SRCR_RFEN1		0x00000100
-#define CCSR_SSI_SRCR_RFEN0		0x00000080
-#define CCSR_SSI_SRCR_RFDIR		0x00000040
-#define CCSR_SSI_SRCR_RXDIR		0x00000020
-#define CCSR_SSI_SRCR_RSHFD		0x00000010
-#define CCSR_SSI_SRCR_RSCKP		0x00000008
-#define CCSR_SSI_SRCR_RFSI		0x00000004
-#define CCSR_SSI_SRCR_RFSL		0x00000002
-#define CCSR_SSI_SRCR_REFS		0x00000001
+/* SSI Interrupt Enable Register -- REG_SSI_SIER 0x18 */
+#define SSI_SIER_RFRC_EN		0x01000000
+#define SSI_SIER_TFRC_EN		0x00800000
+#define SSI_SIER_RDMAE			0x00400000
+#define SSI_SIER_RIE			0x00200000
+#define SSI_SIER_TDMAE			0x00100000
+#define SSI_SIER_TIE			0x00080000
+#define SSI_SIER_CMDAU_EN		0x00040000
+#define SSI_SIER_CMDDU_EN		0x00020000
+#define SSI_SIER_RXT_EN			0x00010000
+#define SSI_SIER_RDR1_EN		0x00008000
+#define SSI_SIER_RDR0_EN		0x00004000
+#define SSI_SIER_TDE1_EN		0x00002000
+#define SSI_SIER_TDE0_EN		0x00001000
+#define SSI_SIER_ROE1_EN		0x00000800
+#define SSI_SIER_ROE0_EN		0x00000400
+#define SSI_SIER_TUE1_EN		0x00000200
+#define SSI_SIER_TUE0_EN		0x00000100
+#define SSI_SIER_TFS_EN			0x00000080
+#define SSI_SIER_RFS_EN			0x00000040
+#define SSI_SIER_TLS_EN			0x00000020
+#define SSI_SIER_RLS_EN			0x00000010
+#define SSI_SIER_RFF1_EN		0x00000008
+#define SSI_SIER_RFF0_EN		0x00000004
+#define SSI_SIER_TFE1_EN		0x00000002
+#define SSI_SIER_TFE0_EN		0x00000001
 
-/* STCCR and SRCCR */
-#define CCSR_SSI_SxCCR_DIV2_SHIFT	18
-#define CCSR_SSI_SxCCR_DIV2		0x00040000
-#define CCSR_SSI_SxCCR_PSR_SHIFT	17
-#define CCSR_SSI_SxCCR_PSR		0x00020000
-#define CCSR_SSI_SxCCR_WL_SHIFT		13
-#define CCSR_SSI_SxCCR_WL_MASK		0x0001E000
-#define CCSR_SSI_SxCCR_WL(x) \
-	(((((x) / 2) - 1) << CCSR_SSI_SxCCR_WL_SHIFT) & CCSR_SSI_SxCCR_WL_MASK)
-#define CCSR_SSI_SxCCR_DC_SHIFT		8
-#define CCSR_SSI_SxCCR_DC_MASK		0x00001F00
-#define CCSR_SSI_SxCCR_DC(x) \
-	((((x) - 1) << CCSR_SSI_SxCCR_DC_SHIFT) & CCSR_SSI_SxCCR_DC_MASK)
-#define CCSR_SSI_SxCCR_PM_SHIFT		0
-#define CCSR_SSI_SxCCR_PM_MASK		0x000000FF
-#define CCSR_SSI_SxCCR_PM(x) \
-	((((x) - 1) << CCSR_SSI_SxCCR_PM_SHIFT) & CCSR_SSI_SxCCR_PM_MASK)
+/* SSI Transmit Configuration Register -- REG_SSI_STCR 0x1C */
+#define SSI_STCR_TXBIT0			0x00000200
+#define SSI_STCR_TFEN1			0x00000100
+#define SSI_STCR_TFEN0			0x00000080
+#define SSI_STCR_TFDIR			0x00000040
+#define SSI_STCR_TXDIR			0x00000020
+#define SSI_STCR_TSHFD			0x00000010
+#define SSI_STCR_TSCKP			0x00000008
+#define SSI_STCR_TFSI			0x00000004
+#define SSI_STCR_TFSL			0x00000002
+#define SSI_STCR_TEFS			0x00000001
+
+/* SSI Receive Configuration Register -- REG_SSI_SRCR 0x20 */
+#define SSI_SRCR_RXEXT			0x00000400
+#define SSI_SRCR_RXBIT0			0x00000200
+#define SSI_SRCR_RFEN1			0x00000100
+#define SSI_SRCR_RFEN0			0x00000080
+#define SSI_SRCR_RFDIR			0x00000040
+#define SSI_SRCR_RXDIR			0x00000020
+#define SSI_SRCR_RSHFD			0x00000010
+#define SSI_SRCR_RSCKP			0x00000008
+#define SSI_SRCR_RFSI			0x00000004
+#define SSI_SRCR_RFSL			0x00000002
+#define SSI_SRCR_REFS			0x00000001
 
 /*
- * The xFCNT bits are read-only, and the xFWM bits are read/write.  Use the
- * CCSR_SSI_SFCSR_xFCNTy() macros to read the FIFO counters, and use the
- * CCSR_SSI_SFCSR_xFWMy() macros to set the watermarks.
+ * SSI Transmit Clock Control Register -- REG_SSI_STCCR 0x24
+ * SSI Receive Clock Control Register -- REG_SSI_SRCCR 0x28
+ */
+#define SSI_SxCCR_DIV2_SHIFT		18
+#define SSI_SxCCR_DIV2			0x00040000
+#define SSI_SxCCR_PSR_SHIFT		17
+#define SSI_SxCCR_PSR			0x00020000
+#define SSI_SxCCR_WL_SHIFT		13
+#define SSI_SxCCR_WL_MASK		0x0001E000
+#define SSI_SxCCR_WL(x) \
+	(((((x) / 2) - 1) << SSI_SxCCR_WL_SHIFT) & SSI_SxCCR_WL_MASK)
+#define SSI_SxCCR_DC_SHIFT		8
+#define SSI_SxCCR_DC_MASK		0x00001F00
+#define SSI_SxCCR_DC(x) \
+	((((x) - 1) << SSI_SxCCR_DC_SHIFT) & SSI_SxCCR_DC_MASK)
+#define SSI_SxCCR_PM_SHIFT		0
+#define SSI_SxCCR_PM_MASK		0x000000FF
+#define SSI_SxCCR_PM(x) \
+	((((x) - 1) << SSI_SxCCR_PM_SHIFT) & SSI_SxCCR_PM_MASK)
+
+/*
+ * SSI FIFO Control/Status Register -- REG_SSI_SFCSR 0x2c
+ *
+ * Tx or Rx FIFO Counter -- SSI_SFCSR_xFCNTy Read-Only
+ * Tx or Rx FIFO Watermarks -- SSI_SFCSR_xFWMy Read/Write
  */
-#define CCSR_SSI_SFCSR_RFCNT1_SHIFT	28
-#define CCSR_SSI_SFCSR_RFCNT1_MASK	0xF0000000
-#define CCSR_SSI_SFCSR_RFCNT1(x) \
-	(((x) & CCSR_SSI_SFCSR_RFCNT1_MASK) >> CCSR_SSI_SFCSR_RFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT1_SHIFT	24
-#define CCSR_SSI_SFCSR_TFCNT1_MASK	0x0F000000
-#define CCSR_SSI_SFCSR_TFCNT1(x) \
-	(((x) & CCSR_SSI_SFCSR_TFCNT1_MASK) >> CCSR_SSI_SFCSR_TFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM1_SHIFT	20
-#define CCSR_SSI_SFCSR_RFWM1_MASK	0x00F00000
-#define CCSR_SSI_SFCSR_RFWM1(x)	\
-	(((x) << CCSR_SSI_SFCSR_RFWM1_SHIFT) & CCSR_SSI_SFCSR_RFWM1_MASK)
-#define CCSR_SSI_SFCSR_TFWM1_SHIFT	16
-#define CCSR_SSI_SFCSR_TFWM1_MASK	0x000F0000
-#define CCSR_SSI_SFCSR_TFWM1(x)	\
-	(((x) << CCSR_SSI_SFCSR_TFWM1_SHIFT) & CCSR_SSI_SFCSR_TFWM1_MASK)
-#define CCSR_SSI_SFCSR_RFCNT0_SHIFT	12
-#define CCSR_SSI_SFCSR_RFCNT0_MASK	0x0000F000
-#define CCSR_SSI_SFCSR_RFCNT0(x) \
-	(((x) & CCSR_SSI_SFCSR_RFCNT0_MASK) >> CCSR_SSI_SFCSR_RFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT0_SHIFT	8
-#define CCSR_SSI_SFCSR_TFCNT0_MASK	0x00000F00
-#define CCSR_SSI_SFCSR_TFCNT0(x) \
-	(((x) & CCSR_SSI_SFCSR_TFCNT0_MASK) >> CCSR_SSI_SFCSR_TFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM0_SHIFT	4
-#define CCSR_SSI_SFCSR_RFWM0_MASK	0x000000F0
-#define CCSR_SSI_SFCSR_RFWM0(x)	\
-	(((x) << CCSR_SSI_SFCSR_RFWM0_SHIFT) & CCSR_SSI_SFCSR_RFWM0_MASK)
-#define CCSR_SSI_SFCSR_TFWM0_SHIFT	0
-#define CCSR_SSI_SFCSR_TFWM0_MASK	0x0000000F
-#define CCSR_SSI_SFCSR_TFWM0(x)	\
-	(((x) << CCSR_SSI_SFCSR_TFWM0_SHIFT) & CCSR_SSI_SFCSR_TFWM0_MASK)
+#define SSI_SFCSR_RFCNT1_SHIFT		28
+#define SSI_SFCSR_RFCNT1_MASK		0xF0000000
+#define SSI_SFCSR_RFCNT1(x) \
+	(((x) & SSI_SFCSR_RFCNT1_MASK) >> SSI_SFCSR_RFCNT1_SHIFT)
+#define SSI_SFCSR_TFCNT1_SHIFT		24
+#define SSI_SFCSR_TFCNT1_MASK		0x0F000000
+#define SSI_SFCSR_TFCNT1(x) \
+	(((x) & SSI_SFCSR_TFCNT1_MASK) >> SSI_SFCSR_TFCNT1_SHIFT)
+#define SSI_SFCSR_RFWM1_SHIFT		20
+#define SSI_SFCSR_RFWM1_MASK		0x00F00000
+#define SSI_SFCSR_RFWM1(x)	\
+	(((x) << SSI_SFCSR_RFWM1_SHIFT) & SSI_SFCSR_RFWM1_MASK)
+#define SSI_SFCSR_TFWM1_SHIFT		16
+#define SSI_SFCSR_TFWM1_MASK		0x000F0000
+#define SSI_SFCSR_TFWM1(x)	\
+	(((x) << SSI_SFCSR_TFWM1_SHIFT) & SSI_SFCSR_TFWM1_MASK)
+#define SSI_SFCSR_RFCNT0_SHIFT		12
+#define SSI_SFCSR_RFCNT0_MASK		0x0000F000
+#define SSI_SFCSR_RFCNT0(x) \
+	(((x) & SSI_SFCSR_RFCNT0_MASK) >> SSI_SFCSR_RFCNT0_SHIFT)
+#define SSI_SFCSR_TFCNT0_SHIFT		8
+#define SSI_SFCSR_TFCNT0_MASK		0x00000F00
+#define SSI_SFCSR_TFCNT0(x) \
+	(((x) & SSI_SFCSR_TFCNT0_MASK) >> SSI_SFCSR_TFCNT0_SHIFT)
+#define SSI_SFCSR_RFWM0_SHIFT		4
+#define SSI_SFCSR_RFWM0_MASK		0x000000F0
+#define SSI_SFCSR_RFWM0(x)	\
+	(((x) << SSI_SFCSR_RFWM0_SHIFT) & SSI_SFCSR_RFWM0_MASK)
+#define SSI_SFCSR_TFWM0_SHIFT		0
+#define SSI_SFCSR_TFWM0_MASK		0x0000000F
+#define SSI_SFCSR_TFWM0(x)	\
+	(((x) << SSI_SFCSR_TFWM0_SHIFT) & SSI_SFCSR_TFWM0_MASK)
 
-#define CCSR_SSI_STR_TEST		0x00008000
-#define CCSR_SSI_STR_RCK2TCK		0x00004000
-#define CCSR_SSI_STR_RFS2TFS		0x00002000
-#define CCSR_SSI_STR_RXSTATE(x) (((x) >> 8) & 0x1F)
-#define CCSR_SSI_STR_TXD2RXD		0x00000080
-#define CCSR_SSI_STR_TCK2RCK		0x00000040
-#define CCSR_SSI_STR_TFS2RFS		0x00000020
-#define CCSR_SSI_STR_TXSTATE(x) ((x) & 0x1F)
+/* SSI Test Register -- REG_SSI_STR 0x30 */
+#define SSI_STR_TEST			0x00008000
+#define SSI_STR_RCK2TCK			0x00004000
+#define SSI_STR_RFS2TFS			0x00002000
+#define SSI_STR_RXSTATE(x)		(((x) >> 8) & 0x1F)
+#define SSI_STR_TXD2RXD			0x00000080
+#define SSI_STR_TCK2RCK			0x00000040
+#define SSI_STR_TFS2RFS			0x00000020
+#define SSI_STR_TXSTATE(x)		((x) & 0x1F)
 
-#define CCSR_SSI_SOR_CLKOFF		0x00000040
-#define CCSR_SSI_SOR_RX_CLR		0x00000020
-#define CCSR_SSI_SOR_TX_CLR		0x00000010
-#define CCSR_SSI_SOR_INIT		0x00000008
-#define CCSR_SSI_SOR_WAIT_SHIFT		1
-#define CCSR_SSI_SOR_WAIT_MASK		0x00000006
-#define CCSR_SSI_SOR_WAIT(x) (((x) & 3) << CCSR_SSI_SOR_WAIT_SHIFT)
-#define CCSR_SSI_SOR_SYNRST 		0x00000001
+/* SSI Option Register -- REG_SSI_SOR 0x34 */
+#define SSI_SOR_CLKOFF			0x00000040
+#define SSI_SOR_RX_CLR			0x00000020
+#define SSI_SOR_TX_CLR			0x00000010
+#define SSI_SOR_xX_CLR(tx)		((tx) ? SSI_SOR_TX_CLR : SSI_SOR_RX_CLR)
+#define SSI_SOR_INIT			0x00000008
+#define SSI_SOR_WAIT_SHIFT		1
+#define SSI_SOR_WAIT_MASK		0x00000006
+#define SSI_SOR_WAIT(x)			(((x) & 3) << SSI_SOR_WAIT_SHIFT)
+#define SSI_SOR_SYNRST			0x00000001
 
-#define CCSR_SSI_SACNT_FRDIV(x)		(((x) & 0x3f) << 5)
-#define CCSR_SSI_SACNT_WR		0x00000010
-#define CCSR_SSI_SACNT_RD		0x00000008
-#define CCSR_SSI_SACNT_RDWR_MASK	0x00000018
-#define CCSR_SSI_SACNT_TIF		0x00000004
-#define CCSR_SSI_SACNT_FV		0x00000002
-#define CCSR_SSI_SACNT_AC97EN		0x00000001
+/* SSI AC97 Control Register -- REG_SSI_SACNT 0x38 */
+#define SSI_SACNT_FRDIV(x)		(((x) & 0x3f) << 5)
+#define SSI_SACNT_WR			0x00000010
+#define SSI_SACNT_RD			0x00000008
+#define SSI_SACNT_RDWR_MASK		0x00000018
+#define SSI_SACNT_TIF			0x00000004
+#define SSI_SACNT_FV			0x00000002
+#define SSI_SACNT_AC97EN		0x00000001
 
 
 struct device;
@@ -255,7 +318,7 @@ static inline void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *stats, u32 sisr)
 }
 
 static inline int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg,
-		struct device *dev)
+					 struct device *dev)
 {
 	return 0;
 }
diff --git a/sound/soc/fsl/fsl_ssi_dbg.c b/sound/soc/fsl/fsl_ssi_dbg.c
index 5469ffbc02535c824340799d0ba1265ff0088d9e..7aac63e2c561d7372b4fa7dda30f4bddf0d62e16 100644
--- a/sound/soc/fsl/fsl_ssi_dbg.c
+++ b/sound/soc/fsl/fsl_ssi_dbg.c
@@ -18,86 +18,86 @@
 
 void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *dbg, u32 sisr)
 {
-	if (sisr & CCSR_SSI_SISR_RFRC)
+	if (sisr & SSI_SISR_RFRC)
 		dbg->stats.rfrc++;
 
-	if (sisr & CCSR_SSI_SISR_TFRC)
+	if (sisr & SSI_SISR_TFRC)
 		dbg->stats.tfrc++;
 
-	if (sisr & CCSR_SSI_SISR_CMDAU)
+	if (sisr & SSI_SISR_CMDAU)
 		dbg->stats.cmdau++;
 
-	if (sisr & CCSR_SSI_SISR_CMDDU)
+	if (sisr & SSI_SISR_CMDDU)
 		dbg->stats.cmddu++;
 
-	if (sisr & CCSR_SSI_SISR_RXT)
+	if (sisr & SSI_SISR_RXT)
 		dbg->stats.rxt++;
 
-	if (sisr & CCSR_SSI_SISR_RDR1)
+	if (sisr & SSI_SISR_RDR1)
 		dbg->stats.rdr1++;
 
-	if (sisr & CCSR_SSI_SISR_RDR0)
+	if (sisr & SSI_SISR_RDR0)
 		dbg->stats.rdr0++;
 
-	if (sisr & CCSR_SSI_SISR_TDE1)
+	if (sisr & SSI_SISR_TDE1)
 		dbg->stats.tde1++;
 
-	if (sisr & CCSR_SSI_SISR_TDE0)
+	if (sisr & SSI_SISR_TDE0)
 		dbg->stats.tde0++;
 
-	if (sisr & CCSR_SSI_SISR_ROE1)
+	if (sisr & SSI_SISR_ROE1)
 		dbg->stats.roe1++;
 
-	if (sisr & CCSR_SSI_SISR_ROE0)
+	if (sisr & SSI_SISR_ROE0)
 		dbg->stats.roe0++;
 
-	if (sisr & CCSR_SSI_SISR_TUE1)
+	if (sisr & SSI_SISR_TUE1)
 		dbg->stats.tue1++;
 
-	if (sisr & CCSR_SSI_SISR_TUE0)
+	if (sisr & SSI_SISR_TUE0)
 		dbg->stats.tue0++;
 
-	if (sisr & CCSR_SSI_SISR_TFS)
+	if (sisr & SSI_SISR_TFS)
 		dbg->stats.tfs++;
 
-	if (sisr & CCSR_SSI_SISR_RFS)
+	if (sisr & SSI_SISR_RFS)
 		dbg->stats.rfs++;
 
-	if (sisr & CCSR_SSI_SISR_TLS)
+	if (sisr & SSI_SISR_TLS)
 		dbg->stats.tls++;
 
-	if (sisr & CCSR_SSI_SISR_RLS)
+	if (sisr & SSI_SISR_RLS)
 		dbg->stats.rls++;
 
-	if (sisr & CCSR_SSI_SISR_RFF1)
+	if (sisr & SSI_SISR_RFF1)
 		dbg->stats.rff1++;
 
-	if (sisr & CCSR_SSI_SISR_RFF0)
+	if (sisr & SSI_SISR_RFF0)
 		dbg->stats.rff0++;
 
-	if (sisr & CCSR_SSI_SISR_TFE1)
+	if (sisr & SSI_SISR_TFE1)
 		dbg->stats.tfe1++;
 
-	if (sisr & CCSR_SSI_SISR_TFE0)
+	if (sisr & SSI_SISR_TFE0)
 		dbg->stats.tfe0++;
 }
 
-/* Show the statistics of a flag only if its interrupt is enabled.  The
- * compiler will optimze this code to a no-op if the interrupt is not
- * enabled.
+/**
+ * Show the statistics of a flag only if its interrupt is enabled
+ *
+ * Compilers will optimize it to a no-op if the interrupt is disabled
  */
 #define SIER_SHOW(flag, name) \
 	do { \
-		if (CCSR_SSI_SIER_##flag) \
+		if (SSI_SIER_##flag) \
 			seq_printf(s, #name "=%u\n", ssi_dbg->stats.name); \
 	} while (0)
 
 
 /**
- * fsl_sysfs_ssi_show: display SSI statistics
+ * Display the statistics for the current SSI device
  *
- * Display the statistics for the current SSI device.  To avoid confusion,
- * we only show those counts that are enabled.
+ * To avoid confusion, only show those counts that are enabled
  */
 static int fsl_ssi_stats_show(struct seq_file *s, void *unused)
 {
@@ -147,7 +147,8 @@ int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg, struct device *dev)
 		return -ENOMEM;
 
 	ssi_dbg->dbg_stats = debugfs_create_file("stats", S_IRUGO,
-			ssi_dbg->dbg_dir, ssi_dbg, &fsl_ssi_stats_ops);
+						 ssi_dbg->dbg_dir, ssi_dbg,
+						 &fsl_ssi_stats_ops);
 	if (!ssi_dbg->dbg_stats) {
 		debugfs_remove(ssi_dbg->dbg_dir);
 		return -ENOMEM;
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index 0c8f86d4020e7af3fb3a1ce2e23104f58eb662b8..07a57209e05595970236701de4556bde12e443fe 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -36,7 +36,6 @@
 #include <linux/of_irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/reset-controller.h>
-#include <linux/clk.h>
 
 #include "hi6210-i2s.h"
 
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 32d6e02e21049719feb32691ee0ded359b5a4572..6cd481bec27524c6e8bd395071ba066fd97871e2 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -236,6 +236,9 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
 	/* Find the IRQ */
 	ctx->irq_num = platform_get_irq(pdev,
 				ctx->pdata->res_info->acpi_ipc_irq_index);
+	if (ctx->irq_num <= 0)
+		return ctx->irq_num < 0 ? ctx->irq_num : -EIO;
+
 	return 0;
 }
 
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index d955836c6870a8ad7d6cbe4939fbc3c457738c77..488ec48f296a9c9cdd99771274d0714112f1a720 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -38,6 +38,7 @@ enum {
 	BYT_RT5651_DMIC_MAP,
 	BYT_RT5651_IN1_MAP,
 	BYT_RT5651_IN2_MAP,
+	BYT_RT5651_IN1_IN2_MAP,
 };
 
 #define BYT_RT5651_MAP(quirk)	((quirk) & GENMASK(7, 0))
@@ -171,6 +172,13 @@ static const struct snd_soc_dapm_route byt_rt5651_intmic_in2_map[] = {
 	{"IN2P", NULL, "Internal Mic"},
 };
 
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_in2_map[] = {
+	{"Internal Mic", NULL, "micbias1"},
+	{"IN1P", NULL, "Internal Mic"},
+	{"IN2P", NULL, "Internal Mic"},
+	{"IN3P", NULL, "Headset Mic"},
+};
+
 static const struct snd_kcontrol_new byt_rt5651_controls[] = {
 	SOC_DAPM_PIN_SWITCH("Headphone"),
 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
@@ -256,7 +264,7 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
 			DMI_MATCH(DMI_SYS_VENDOR, "KIANO"),
 			DMI_MATCH(DMI_PRODUCT_NAME, "KIANO SlimNote 14.2"),
 		},
-		.driver_data = (void *)(BYT_RT5651_IN2_MAP),
+		.driver_data = (void *)(BYT_RT5651_IN1_IN2_MAP),
 	},
 	{}
 };
@@ -281,6 +289,10 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
 		custom_map = byt_rt5651_intmic_in2_map;
 		num_routes = ARRAY_SIZE(byt_rt5651_intmic_in2_map);
 		break;
+	case BYT_RT5651_IN1_IN2_MAP:
+		custom_map = byt_rt5651_intmic_in1_in2_map;
+		num_routes = ARRAY_SIZE(byt_rt5651_intmic_in1_in2_map);
+		break;
 	default:
 		custom_map = byt_rt5651_intmic_dmic_map;
 		num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic_map);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 18d129caa974f7c905930d6381f7673820b0323f..f898ee140cdcc5698d8cbf92a7d82272a44f5311 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -118,6 +118,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
 	SND_SOC_DAPM_HP("Headphone", NULL),
 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
 	SND_SOC_DAPM_MIC("Int Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
 	SND_SOC_DAPM_SPK("Ext Spk", NULL),
 	SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
 			platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -128,6 +129,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
 	{"IN1N", NULL, "Headset Mic"},
 	{"DMIC L1", NULL, "Int Mic"},
 	{"DMIC R1", NULL, "Int Mic"},
+	{"IN2P", NULL, "Int Analog Mic"},
+	{"IN2N", NULL, "Int Analog Mic"},
 	{"Headphone", NULL, "HPOL"},
 	{"Headphone", NULL, "HPOR"},
 	{"Ext Spk", NULL, "SPOL"},
@@ -135,6 +138,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
 	{"Headphone", NULL, "Platform Clock"},
 	{"Headset Mic", NULL, "Platform Clock"},
 	{"Int Mic", NULL, "Platform Clock"},
+	{"Int Analog Mic", NULL, "Platform Clock"},
+	{"Int Analog Mic", NULL, "micbias1"},
+	{"Int Analog Mic", NULL, "micbias2"},
 	{"Ext Spk", NULL, "Platform Clock"},
 };
 
@@ -189,6 +195,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
 	SOC_DAPM_PIN_SWITCH("Headphone"),
 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
 	SOC_DAPM_PIN_SWITCH("Int Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
 	SOC_DAPM_PIN_SWITCH("Ext Spk"),
 };
 
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 5e1ea0371c9097bf1c79737bc1892d7667479480..3c51607792044fe5200842a6a23be2d4f175fe3c 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -76,7 +76,7 @@ static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
 	}
 
 	/* set correct codec filter for DAI format and clock config */
-	snd_soc_update_bits(rtd->codec, 0x83, 0xffff, 0x8000);
+	snd_soc_component_update_bits(codec_dai->component, 0x83, 0xffff, 0x8000);
 
 	return ret;
 }
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 6dcad0a8a0d045969873d983fa8b89bbe1f91e96..bf7014ca486f7fdc49f5807f581aceff58c997a2 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -225,7 +225,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
 	}
 
 	jack = &ctx->kabylake_headset;
-	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 271ae3c2c5354c5788aa9383772ebbe63b4e8d9a..90ea98f01c4cc03a91a96d6182773315191412da 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -195,7 +195,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
 	}
 
 	jack = &ctx->kabylake_headset;
-	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+	snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
 	snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/intel/boards/mfld_machine.c b/sound/soc/intel/boards/mfld_machine.c
index 6f44acfb4aae7337aa1f4009e0a6c0b71efad2c3..7cb44fdde1ee80a78e78f25b9c0de09b18748f86 100644
--- a/sound/soc/intel/boards/mfld_machine.c
+++ b/sound/soc/intel/boards/mfld_machine.c
@@ -372,6 +372,8 @@ static int snd_mfld_mc_probe(struct platform_device *pdev)
 
 	/* retrive the irq number */
 	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0)
+		return irq < 0 ? irq : -ENODEV;
 
 	/* audio interrupt base of SRAM location where
 	 * interrupts are stored by System FW */
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 11c0805393ff96c50b03b2b29fb70a32b53fa3dc..fd82f4b1d4a07bc2b289a27b9edf4a4610606587 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -269,7 +269,7 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
 	 */
 
 	timeout = jiffies + msecs_to_jiffies(time);
-	while (((sst_dsp_shim_read_unlocked(ctx, offset) & mask) != target)
+	while ((((reg = sst_dsp_shim_read_unlocked(ctx, offset)) & mask) != target)
 		&& time_before(jiffies, timeout)) {
 		k++;
 		if (k > 10)
@@ -278,8 +278,6 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
 		usleep_range(s, 2*s);
 	}
 
-	reg = sst_dsp_shim_read_unlocked(ctx, offset);
-
 	if ((reg & mask) == target) {
 		dev_dbg(ctx->dev, "FW Poll Status: reg=%#x %s successful\n",
 					reg, operation);
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 4524211960e4a4a5941f08a8a2066096e471cf6f..440bca7afbf142874bf150a580eea27b91ae016b 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -595,7 +595,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
 	INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
 	skl->d0i3.state = SKL_DSP_D0I3_NONE;
 
-	return 0;
+	return skl_dsp_acquire_irq(sst);
 }
 EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
 
diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
index 387de388ce29405be5ad10875cb66a17adf39f55..245df1067ba8902e2151cc999fd5172220f2968c 100644
--- a/sound/soc/intel/skylake/cnl-sst.c
+++ b/sound/soc/intel/skylake/cnl-sst.c
@@ -458,7 +458,7 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
 	cnl->boot_complete = false;
 	init_waitqueue_head(&cnl->boot_wait);
 
-	return 0;
+	return skl_dsp_acquire_irq(sst);
 }
 EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
 
diff --git a/sound/soc/intel/skylake/skl-i2s.h b/sound/soc/intel/skylake/skl-i2s.h
new file mode 100644
index 0000000000000000000000000000000000000000..dcf819bc688f3d7f05e9594be8a7ee61ddd648d7
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-i2s.h
@@ -0,0 +1,64 @@
+/*
+ *  skl-i2s.h - i2s blob mapping
+ *
+ *  Copyright (C) 2017 Intel Corp
+ *  Author: Subhransu S. Prusty < subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SOUND_SOC_SKL_I2S_H
+#define __SOUND_SOC_SKL_I2S_H
+
+#define SKL_I2S_MAX_TIME_SLOTS		8
+#define SKL_MCLK_DIV_CLK_SRC_MASK	GENMASK(17, 16)
+
+#define SKL_MNDSS_DIV_CLK_SRC_MASK	GENMASK(21, 20)
+#define SKL_SHIFT(x)			(ffs(x) - 1)
+#define SKL_MCLK_DIV_RATIO_MASK		GENMASK(11, 0)
+
+struct skl_i2s_config {
+	u32 ssc0;
+	u32 ssc1;
+	u32 sscto;
+	u32 sspsp;
+	u32 sstsa;
+	u32 ssrsa;
+	u32 ssc2;
+	u32 sspsp2;
+	u32 ssc3;
+	u32 ssioc;
+} __packed;
+
+struct skl_i2s_config_mclk {
+	u32 mdivctrl;
+	u32 mdivr;
+};
+
+/**
+ * struct skl_i2s_config_blob_legacy - Structure defines I2S Gateway
+ * configuration legacy blob
+ *
+ * @gtw_attr:		Gateway attribute for the I2S Gateway
+ * @tdm_ts_group:	TDM slot mapping against channels in the Gateway.
+ * @i2s_cfg:		I2S HW registers
+ * @mclk:		MCLK clock source and divider values
+ */
+struct skl_i2s_config_blob_legacy {
+	u32 gtw_attr;
+	u32 tdm_ts_group[SKL_I2S_MAX_TIME_SLOTS];
+	struct skl_i2s_config i2s_cfg;
+	struct skl_i2s_config_mclk mclk;
+};
+
+#endif /* __SOUND_SOC_SKL_I2S_H */
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 61b5bfa79d1325b5a42e69119ccb1e39c20c0b6c..8cbf080c38b3ef1acfcecb8f869bfbcee59eb732 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -55,6 +55,19 @@ static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
 	return 0;
 }
 
+#define SKL_ASTATE_PARAM_ID	4
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data)
+{
+	struct skl_ipc_large_config_msg	msg = {0};
+
+	msg.large_param_id = SKL_ASTATE_PARAM_ID;
+	msg.param_data_size = (cnt * sizeof(struct skl_astate_param) +
+				sizeof(cnt));
+
+	skl_ipc_set_large_config(&ctx->ipc, &msg, data);
+}
+
 #define NOTIFICATION_PARAM_ID 3
 #define NOTIFICATION_MASK 0xf
 
@@ -404,11 +417,20 @@ int skl_resume_dsp(struct skl *skl)
 	if (skl->skl_sst->is_first_boot == true)
 		return 0;
 
+	/* disable dynamic clock gating during fw and lib download */
+	ctx->enable_miscbdcge(ctx->dev, false);
+
 	ret = skl_dsp_wake(ctx->dsp);
+	ctx->enable_miscbdcge(ctx->dev, true);
 	if (ret < 0)
 		return ret;
 
 	skl_dsp_enable_notification(skl->skl_sst, false);
+
+	if (skl->cfg.astate_cfg != NULL) {
+		skl_dsp_set_astate_cfg(skl->skl_sst, skl->cfg.astate_cfg->count,
+					skl->cfg.astate_cfg);
+	}
 	return ret;
 }
 
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 3eaac41090ca7f8b07ed99511d58cbc1f5498c56..3b1d2b828c1b6425c06614cce20293265c3c35ed 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -19,6 +19,7 @@
  */
 #include <linux/pci.h>
 #include "skl.h"
+#include "skl-i2s.h"
 
 #define NHLT_ACPI_HEADER_SIG	"NHLT"
 
@@ -43,7 +44,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
 	obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
 	if (obj && obj->type == ACPI_TYPE_BUFFER) {
 		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
-		nhlt_table = (struct nhlt_acpi_table *)
+		if (nhlt_ptr->length)
+			nhlt_table = (struct nhlt_acpi_table *)
 				memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
 				MEMREMAP_WB);
 		ACPI_FREE(obj);
@@ -276,3 +278,157 @@ void skl_nhlt_remove_sysfs(struct skl *skl)
 
 	sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
 }
+
+/*
+ * Queries NHLT for all the fmt configuration for a particular endpoint and
+ * stores all possible rates supported in a rate table for the corresponding
+ * sclk/sclkfs.
+ */
+static void skl_get_ssp_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks,
+				struct nhlt_fmt *fmt, u8 id)
+{
+	struct skl_i2s_config_blob_legacy *i2s_config;
+	struct skl_clk_parent_src *parent;
+	struct skl_ssp_clk *sclk, *sclkfs;
+	struct nhlt_fmt_cfg *fmt_cfg;
+	struct wav_fmt_ext *wav_fmt;
+	unsigned long rate = 0;
+	bool present = false;
+	int rate_index = 0;
+	u16 channels, bps;
+	u8 clk_src;
+	int i, j;
+	u32 fs;
+
+	sclk = &ssp_clks[SKL_SCLK_OFS];
+	sclkfs = &ssp_clks[SKL_SCLKFS_OFS];
+
+	if (fmt->fmt_count == 0)
+		return;
+
+	for (i = 0; i < fmt->fmt_count; i++) {
+		fmt_cfg = &fmt->fmt_config[i];
+		wav_fmt = &fmt_cfg->fmt_ext;
+
+		channels = wav_fmt->fmt.channels;
+		bps = wav_fmt->fmt.bits_per_sample;
+		fs = wav_fmt->fmt.samples_per_sec;
+
+		/*
+		 * In case of TDM configuration on a ssp, there can
+		 * be more than one blob in which channel masks are
+		 * different for each usecase for a specific rate and bps.
+		 * But the sclk rate will be generated for the total
+		 * number of channels used for that endpoint.
+		 *
+		 * So for the given fs and bps, choose blob which has
+		 * the superset of all channels for that endpoint and
+		 * derive the rate.
+		 */
+		for (j = i; j < fmt->fmt_count; j++) {
+			fmt_cfg = &fmt->fmt_config[j];
+			wav_fmt = &fmt_cfg->fmt_ext;
+			if ((fs == wav_fmt->fmt.samples_per_sec) &&
+			   (bps == wav_fmt->fmt.bits_per_sample))
+				channels = max_t(u16, channels,
+						wav_fmt->fmt.channels);
+		}
+
+		rate = channels * bps * fs;
+
+		/* check if the rate is added already to the given SSP's sclk */
+		for (j = 0; (j < SKL_MAX_CLK_RATES) &&
+			    (sclk[id].rate_cfg[j].rate != 0); j++) {
+			if (sclk[id].rate_cfg[j].rate == rate) {
+				present = true;
+				break;
+			}
+		}
+
+		/* Fill rate and parent for sclk/sclkfs */
+		if (!present) {
+			/* MCLK Divider Source Select */
+			i2s_config = (struct skl_i2s_config_blob_legacy *)
+						fmt->fmt_config[0].config.caps;
+			clk_src = ((i2s_config->mclk.mdivctrl)
+					& SKL_MNDSS_DIV_CLK_SRC_MASK) >>
+					SKL_SHIFT(SKL_MNDSS_DIV_CLK_SRC_MASK);
+
+			parent = skl_get_parent_clk(clk_src);
+
+			/*
+			 * Do not copy the config data if there is no parent
+			 * clock available for this clock source select
+			 */
+			if (!parent)
+				continue;
+
+			sclk[id].rate_cfg[rate_index].rate = rate;
+			sclk[id].rate_cfg[rate_index].config = fmt_cfg;
+			sclkfs[id].rate_cfg[rate_index].rate = rate;
+			sclkfs[id].rate_cfg[rate_index].config = fmt_cfg;
+			sclk[id].parent_name = parent->name;
+			sclkfs[id].parent_name = parent->name;
+
+			rate_index++;
+		}
+	}
+}
+
+static void skl_get_mclk(struct skl *skl, struct skl_ssp_clk *mclk,
+				struct nhlt_fmt *fmt, u8 id)
+{
+	struct skl_i2s_config_blob_legacy *i2s_config;
+	struct nhlt_specific_cfg *fmt_cfg;
+	struct skl_clk_parent_src *parent;
+	u32 clkdiv, div_ratio;
+	u8 clk_src;
+
+	fmt_cfg = &fmt->fmt_config[0].config;
+	i2s_config = (struct skl_i2s_config_blob_legacy *)fmt_cfg->caps;
+
+	/* MCLK Divider Source Select */
+	clk_src = ((i2s_config->mclk.mdivctrl) & SKL_MCLK_DIV_CLK_SRC_MASK) >>
+					SKL_SHIFT(SKL_MCLK_DIV_CLK_SRC_MASK);
+
+	clkdiv = i2s_config->mclk.mdivr & SKL_MCLK_DIV_RATIO_MASK;
+
+	/* bypass divider */
+	div_ratio = 1;
+
+	if (clkdiv != SKL_MCLK_DIV_RATIO_MASK)
+		/* Divider is 2 + clkdiv */
+		div_ratio = clkdiv + 2;
+
+	/* Calculate MCLK rate from source using div value */
+	parent = skl_get_parent_clk(clk_src);
+	if (!parent)
+		return;
+
+	mclk[id].rate_cfg[0].rate = parent->rate/div_ratio;
+	mclk[id].rate_cfg[0].config = &fmt->fmt_config[0];
+	mclk[id].parent_name = parent->name;
+}
+
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks)
+{
+	struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+	struct nhlt_endpoint *epnt;
+	struct nhlt_fmt *fmt;
+	int i;
+	u8 id;
+
+	epnt = (struct nhlt_endpoint *)nhlt->desc;
+	for (i = 0; i < nhlt->endpoint_count; i++) {
+		if (epnt->linktype == NHLT_LINK_SSP) {
+			id = epnt->virtual_bus_id;
+
+			fmt = (struct nhlt_fmt *)(epnt->config.caps
+					+ epnt->config.size);
+
+			skl_get_ssp_clks(skl, ssp_clks, fmt, id);
+			skl_get_mclk(skl, ssp_clks, fmt, id);
+		}
+		epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+	}
+}
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 1dd97479e0c014bf1f814f16dd419268529454e0..e4682853382614d83d73bf27a8e5a7ee3810bbca 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -537,7 +537,7 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
 
 	snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
 
-	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	link = snd_hdac_ext_bus_get_link(ebus, codec_dai->component->name);
 	if (!link)
 		return -EINVAL;
 
@@ -620,7 +620,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
 
 	link_dev->link_prepared = 0;
 
-	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec_dai->component->name);
 	if (!link)
 		return -EINVAL;
 
@@ -1343,7 +1343,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
 			return -EIO;
 		}
 
+		/* disable dynamic clock gating during fw and lib download */
+		skl->skl_sst->enable_miscbdcge(platform->dev, false);
+
 		ret = ops->init_fw(platform->dev, skl->skl_sst);
+		skl->skl_sst->enable_miscbdcge(platform->dev, true);
 		if (ret < 0) {
 			dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
 			return ret;
@@ -1351,6 +1355,12 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
 		skl_populate_modules(skl);
 		skl->skl_sst->update_d0i3c = skl_update_d0i3c;
 		skl_dsp_enable_notification(skl->skl_sst, false);
+
+		if (skl->cfg.astate_cfg != NULL) {
+			skl_dsp_set_astate_cfg(skl->skl_sst,
+					skl->cfg.astate_cfg->count,
+					skl->cfg.astate_cfg);
+		}
 	}
 	pm_runtime_mark_last_busy(platform->dev);
 	pm_runtime_put_autosuspend(platform->dev);
diff --git a/sound/soc/intel/skylake/skl-ssp-clk.h b/sound/soc/intel/skylake/skl-ssp-clk.h
new file mode 100644
index 0000000000000000000000000000000000000000..c9ea8400426033065888e8d88b37379af738ed17
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-ssp-clk.h
@@ -0,0 +1,79 @@
+/*
+ *  skl-ssp-clk.h - Skylake ssp clock information and ipc structure
+ *
+ *  Copyright (C) 2017 Intel Corp
+ *  Author: Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>
+ *  Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef SOUND_SOC_SKL_SSP_CLK_H
+#define SOUND_SOC_SKL_SSP_CLK_H
+
+#define SKL_MAX_SSP		6
+/* xtal/cardinal/pll, parent of ssp clocks and mclk */
+#define SKL_MAX_CLK_SRC		3
+#define SKL_MAX_SSP_CLK_TYPES	3 /* mclk, sclk, sclkfs */
+
+#define SKL_MAX_CLK_CNT		(SKL_MAX_SSP * SKL_MAX_SSP_CLK_TYPES)
+
+/* Max number of configurations supported for each clock */
+#define SKL_MAX_CLK_RATES	10
+
+#define SKL_SCLK_OFS		SKL_MAX_SSP
+#define SKL_SCLKFS_OFS		(SKL_SCLK_OFS + SKL_MAX_SSP)
+
+enum skl_clk_type {
+	SKL_MCLK,
+	SKL_SCLK,
+	SKL_SCLK_FS,
+};
+
+enum skl_clk_src_type {
+	SKL_XTAL,
+	SKL_CARDINAL,
+	SKL_PLL,
+};
+
+struct skl_clk_parent_src {
+	u8 clk_id;
+	const char *name;
+	unsigned long rate;
+	const char *parent_name;
+};
+
+struct skl_clk_rate_cfg_table {
+	unsigned long rate;
+	void *config;
+};
+
+/*
+ * rate for mclk will be in rates[0]. For sclk and sclkfs, rates[] store
+ * all possible clocks ssp can generate for that platform.
+ */
+struct skl_ssp_clk {
+	const char *name;
+	const char *parent_name;
+	struct skl_clk_rate_cfg_table rate_cfg[SKL_MAX_CLK_RATES];
+};
+
+struct skl_clk_pdata {
+	struct skl_clk_parent_src *parent_clks;
+	int num_clks;
+	struct skl_ssp_clk *ssp_clks;
+	void *pvt_data;
+};
+
+#endif /* SOUND_SOC_SKL_SSP_CLK_H */
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index 19ee1d4f3bdff5b2ff5973d702c9ddf714de38ea..71e31ad0bb3fb0a11b0d906bfef6a7b3922ddae4 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -435,16 +435,22 @@ struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
 			return NULL;
 	}
 
+	return sst;
+}
+
+int skl_dsp_acquire_irq(struct sst_dsp *sst)
+{
+	struct sst_dsp_device *sst_dev = sst->sst_dev;
+	int ret;
+
 	/* Register the ISR */
 	ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
 		sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
-	if (ret) {
+	if (ret)
 		dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
 			       sst->irq);
-		return NULL;
-	}
 
-	return sst;
+	return ret;
 }
 
 void skl_dsp_free(struct sst_dsp *dsp)
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index eba20d37ba8c077d483fce9b673831f3ff832f30..12fc9a73dc8a6d782725ea4ed16be359aa9a95c1 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -206,6 +206,7 @@ int skl_cldma_wait_interruptible(struct sst_dsp *ctx);
 void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
 struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
 		struct sst_dsp_device *sst_dev, int irq);
+int skl_dsp_acquire_irq(struct sst_dsp *sst);
 bool is_skl_dsp_running(struct sst_dsp *ctx);
 
 unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx);
@@ -251,6 +252,9 @@ void skl_freeup_uuid_list(struct skl_sst *ctx);
 
 int skl_dsp_strip_extended_manifest(struct firmware *fw);
 void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable);
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data);
+
 int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
 		struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp,
 		struct sst_dsp_device *skl_dev);
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index 8ff89280d9fd44f11e34121f1dddd7647004c688..2ae405617876281749525db63dad5fffd0e8305d 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -178,7 +178,8 @@ static inline int skl_pvtid_128(struct uuid_module *module)
  * skl_get_pvt_id: generate a private id for use as module id
  *
  * @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @instance_id: module's instance id
  *
  * This generates a 128 bit private unique id for a module TYPE so that
  * module instance is unique
@@ -208,7 +209,8 @@ EXPORT_SYMBOL_GPL(skl_get_pvt_id);
  * skl_put_pvt_id: free up the private id allocated
  *
  * @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @pvt_id: module pvt id
  *
  * This frees a 128 bit private unique id previously generated
  */
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index a436abf2fe3f38b65e93f9637de0f804b859a2aa..5a7e41b65ef3d8aa3302d6c2a449ef58f26edc72 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -569,7 +569,7 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
 
 	sst->fw_ops = skl_fw_ops;
 
-	return 0;
+	return skl_dsp_acquire_irq(sst);
 }
 EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
 
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 81923da18ac2259ad159b8c0282242cc7c94c53c..28bc16a8e09a6657ed4799298a3c3b203987f696 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -3056,11 +3056,13 @@ static int skl_tplg_get_int_tkn(struct device *dev,
 		struct snd_soc_tplg_vendor_value_elem *tkn_elem,
 		struct skl *skl)
 {
-	int tkn_count = 0, ret;
+	int tkn_count = 0, ret, size;
 	static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
 	struct skl_module_res *res = NULL;
 	struct skl_module_iface *fmt = NULL;
 	struct skl_module *mod = NULL;
+	static struct skl_astate_param *astate_table;
+	static int astate_cfg_idx, count;
 	int i;
 
 	if (skl->modules) {
@@ -3093,6 +3095,46 @@ static int skl_tplg_get_int_tkn(struct device *dev,
 		mod_idx = tkn_elem->value;
 		break;
 
+	case SKL_TKN_U32_ASTATE_COUNT:
+		if (astate_table != NULL) {
+			dev_err(dev, "More than one entry for A-State count");
+			return -EINVAL;
+		}
+
+		if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
+			dev_err(dev, "Invalid A-State count %d\n",
+				tkn_elem->value);
+			return -EINVAL;
+		}
+
+		size = tkn_elem->value * sizeof(struct skl_astate_param) +
+				sizeof(count);
+		skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+		if (!skl->cfg.astate_cfg)
+			return -ENOMEM;
+
+		astate_table = skl->cfg.astate_cfg->astate_table;
+		count = skl->cfg.astate_cfg->count = tkn_elem->value;
+		break;
+
+	case SKL_TKN_U32_ASTATE_IDX:
+		if (tkn_elem->value >= count) {
+			dev_err(dev, "Invalid A-State index %d\n",
+				tkn_elem->value);
+			return -EINVAL;
+		}
+
+		astate_cfg_idx = tkn_elem->value;
+		break;
+
+	case SKL_TKN_U32_ASTATE_KCPS:
+		astate_table[astate_cfg_idx].kcps = tkn_elem->value;
+		break;
+
+	case SKL_TKN_U32_ASTATE_CLK_SRC:
+		astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
+		break;
+
 	case SKL_TKN_U8_IN_PIN_TYPE:
 	case SKL_TKN_U8_OUT_PIN_TYPE:
 	case SKL_TKN_U8_IN_QUEUE_COUNT:
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 31d8634e8aa1c4f71e7f089bd1da0545e1345ab0..32ce64c6b2dced88c562bea3aef2dfdb9d1fa687 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -355,6 +355,7 @@ static int skl_resume(struct device *dev)
 
 		if (ebus->cmd_dma_state)
 			snd_hdac_bus_init_cmd_io(&ebus->bus);
+		ret = 0;
 	} else {
 		ret = _skl_resume(ebus);
 
@@ -435,19 +436,51 @@ static int skl_free(struct hdac_ext_bus *ebus)
 	return 0;
 }
 
-static int skl_machine_device_register(struct skl *skl, void *driver_data)
+/*
+ * For each ssp there are 3 clocks (mclk/sclk/sclkfs).
+ * e.g. for ssp0, clocks will be named as
+ *      "ssp0_mclk", "ssp0_sclk", "ssp0_sclkfs"
+ * So for skl+, there are 6 ssps, so 18 clocks will be created.
+ */
+static struct skl_ssp_clk skl_ssp_clks[] = {
+	{.name = "ssp0_mclk"}, {.name = "ssp1_mclk"}, {.name = "ssp2_mclk"},
+	{.name = "ssp3_mclk"}, {.name = "ssp4_mclk"}, {.name = "ssp5_mclk"},
+	{.name = "ssp0_sclk"}, {.name = "ssp1_sclk"}, {.name = "ssp2_sclk"},
+	{.name = "ssp3_sclk"}, {.name = "ssp4_sclk"}, {.name = "ssp5_sclk"},
+	{.name = "ssp0_sclkfs"}, {.name = "ssp1_sclkfs"},
+						{.name = "ssp2_sclkfs"},
+	{.name = "ssp3_sclkfs"}, {.name = "ssp4_sclkfs"},
+						{.name = "ssp5_sclkfs"},
+};
+
+static int skl_find_machine(struct skl *skl, void *driver_data)
 {
-	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
-	struct platform_device *pdev;
 	struct snd_soc_acpi_mach *mach = driver_data;
-	int ret;
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct skl_machine_pdata *pdata;
 
 	mach = snd_soc_acpi_find_machine(mach);
 	if (mach == NULL) {
 		dev_err(bus->dev, "No matching machine driver found\n");
 		return -ENODEV;
 	}
+
+	skl->mach = mach;
 	skl->fw_name = mach->fw_filename;
+	pdata = skl->mach->pdata;
+
+	if (mach->pdata)
+		skl->use_tplg_pcm = pdata->use_tplg_pcm;
+
+	return 0;
+}
+
+static int skl_machine_device_register(struct skl *skl)
+{
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct snd_soc_acpi_mach *mach = skl->mach;
+	struct platform_device *pdev;
+	int ret;
 
 	pdev = platform_device_alloc(mach->drv_name, -1);
 	if (pdev == NULL) {
@@ -462,11 +495,8 @@ static int skl_machine_device_register(struct skl *skl, void *driver_data)
 		return -EIO;
 	}
 
-	if (mach->pdata) {
-		skl->use_tplg_pcm =
-			((struct skl_machine_pdata *)mach->pdata)->use_tplg_pcm;
+	if (mach->pdata)
 		dev_set_drvdata(&pdev->dev, mach->pdata);
-	}
 
 	skl->i2s_dev = pdev;
 
@@ -509,6 +539,74 @@ static void skl_dmic_device_unregister(struct skl *skl)
 		platform_device_unregister(skl->dmic_dev);
 }
 
+static struct skl_clk_parent_src skl_clk_src[] = {
+	{ .clk_id = SKL_XTAL, .name = "xtal" },
+	{ .clk_id = SKL_CARDINAL, .name = "cardinal", .rate = 24576000 },
+	{ .clk_id = SKL_PLL, .name = "pll", .rate = 96000000 },
+};
+
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(skl_clk_src); i++) {
+		if (skl_clk_src[i].clk_id == clk_id)
+			return &skl_clk_src[i];
+	}
+
+	return NULL;
+}
+
+static void init_skl_xtal_rate(int pci_id)
+{
+	switch (pci_id) {
+	case 0x9d70:
+	case 0x9d71:
+		skl_clk_src[0].rate = 24000000;
+		return;
+
+	default:
+		skl_clk_src[0].rate = 19200000;
+		return;
+	}
+}
+
+static int skl_clock_device_register(struct skl *skl)
+{
+	struct platform_device_info pdevinfo = {NULL};
+	struct skl_clk_pdata *clk_pdata;
+
+	clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata),
+							GFP_KERNEL);
+	if (!clk_pdata)
+		return -ENOMEM;
+
+	init_skl_xtal_rate(skl->pci->device);
+
+	clk_pdata->parent_clks = skl_clk_src;
+	clk_pdata->ssp_clks = skl_ssp_clks;
+	clk_pdata->num_clks = ARRAY_SIZE(skl_ssp_clks);
+
+	/* Query NHLT to fill the rates and parent */
+	skl_get_clks(skl, clk_pdata->ssp_clks);
+	clk_pdata->pvt_data = skl;
+
+	/* Register Platform device */
+	pdevinfo.parent = &skl->pci->dev;
+	pdevinfo.id = -1;
+	pdevinfo.name = "skl-ssp-clk";
+	pdevinfo.data = clk_pdata;
+	pdevinfo.size_data = sizeof(*clk_pdata);
+	skl->clk_dev = platform_device_register_full(&pdevinfo);
+	return PTR_ERR_OR_ZERO(skl->clk_dev);
+}
+
+static void skl_clock_device_unregister(struct skl *skl)
+{
+	if (skl->clk_dev)
+		platform_device_unregister(skl->clk_dev);
+}
+
 /*
  * Probe the given codec address
  */
@@ -615,18 +713,30 @@ static void skl_probe_work(struct work_struct *work)
 	/* create codec instances */
 	skl_codec_create(ebus);
 
+	/* register platform dai and controls */
+	err = skl_platform_register(bus->dev);
+	if (err < 0) {
+		dev_err(bus->dev, "platform register failed: %d\n", err);
+		return;
+	}
+
+	if (bus->ppcap) {
+		err = skl_machine_device_register(skl);
+		if (err < 0) {
+			dev_err(bus->dev, "machine register failed: %d\n", err);
+			goto out_err;
+		}
+	}
+
 	if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
 		err = snd_hdac_display_power(bus, false);
 		if (err < 0) {
 			dev_err(bus->dev, "Cannot turn off display power on i915\n");
+			skl_machine_device_unregister(skl);
 			return;
 		}
 	}
 
-	/* register platform dai and controls */
-	err = skl_platform_register(bus->dev);
-	if (err < 0)
-		return;
 	/*
 	 * we are done probing so decrement link counts
 	 */
@@ -791,18 +901,21 @@ static int skl_probe(struct pci_dev *pci,
 
 	/* check if dsp is there */
 	if (bus->ppcap) {
-		err = skl_machine_device_register(skl,
-				  (void *)pci_id->driver_data);
+		/* create device for dsp clk */
+		err = skl_clock_device_register(skl);
+		if (err < 0)
+			goto out_clk_free;
+
+		err = skl_find_machine(skl, (void *)pci_id->driver_data);
 		if (err < 0)
 			goto out_nhlt_free;
 
 		err = skl_init_dsp(skl);
 		if (err < 0) {
 			dev_dbg(bus->dev, "error failed to register dsp\n");
-			goto out_mach_free;
+			goto out_nhlt_free;
 		}
 		skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
-
 	}
 	if (bus->mlcap)
 		snd_hdac_ext_bus_get_ml_capabilities(ebus);
@@ -820,8 +933,8 @@ static int skl_probe(struct pci_dev *pci,
 
 out_dsp_free:
 	skl_free_dsp(skl);
-out_mach_free:
-	skl_machine_device_unregister(skl);
+out_clk_free:
+	skl_clock_device_unregister(skl);
 out_nhlt_free:
 	skl_nhlt_free(skl->nhlt);
 out_free:
@@ -872,6 +985,7 @@ static void skl_remove(struct pci_dev *pci)
 	skl_free_dsp(skl);
 	skl_machine_device_unregister(skl);
 	skl_dmic_device_unregister(skl);
+	skl_clock_device_unregister(skl);
 	skl_nhlt_remove_sysfs(skl);
 	skl_nhlt_free(skl->nhlt);
 	skl_free(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index e00cde8200ddd186cea76aeec8442891ad4107a9..f411579bc7138ac1fce813cf8914720862ede50d 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -25,9 +25,12 @@
 #include <sound/hdaudio_ext.h>
 #include <sound/soc.h>
 #include "skl-nhlt.h"
+#include "skl-ssp-clk.h"
 
 #define SKL_SUSPEND_DELAY 2000
 
+#define SKL_MAX_ASTATE_CFG		3
+
 #define AZX_PCIREG_PGCTL		0x44
 #define AZX_PGCTL_LSRMD_MASK		(1 << 4)
 #define AZX_PCIREG_CGCTL		0x48
@@ -45,6 +48,20 @@ struct skl_dsp_resource {
 
 struct skl_debug;
 
+struct skl_astate_param {
+	u32 kcps;
+	u32 clk_src;
+};
+
+struct skl_astate_config {
+	u32 count;
+	struct skl_astate_param astate_table[0];
+};
+
+struct skl_fw_config {
+	struct skl_astate_config *astate_cfg;
+};
+
 struct skl {
 	struct hdac_ext_bus ebus;
 	struct pci_dev *pci;
@@ -52,6 +69,7 @@ struct skl {
 	unsigned int init_done:1; /* delayed init status */
 	struct platform_device *dmic_dev;
 	struct platform_device *i2s_dev;
+	struct platform_device *clk_dev;
 	struct snd_soc_platform *platform;
 	struct snd_soc_dai_driver *dais;
 
@@ -75,6 +93,8 @@ struct skl {
 	u8 nr_modules;
 	struct skl_module **modules;
 	bool use_tplg_pcm;
+	struct skl_fw_config cfg;
+	struct snd_soc_acpi_mach *mach;
 };
 
 #define skl_to_ebus(s)	(&(s)->ebus)
@@ -125,6 +145,8 @@ const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
 void skl_update_d0i3c(struct device *dev, bool enable);
 int skl_nhlt_create_sysfs(struct skl *skl);
 void skl_nhlt_remove_sysfs(struct skl *skl);
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks);
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id);
 
 struct skl_module_cfg;
 
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
index affa7fb25dd9080b853672d9669d0820205443cf..949fc3a1d02578d4e718cb80c87af4263de72d9f 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
@@ -14,451 +14,285 @@
  * GNU General Public License for more details.
  */
 
-#include <sound/soc.h>
-#include <linux/regmap.h>
-#include <linux/pm_runtime.h>
-
 #include "mt2701-afe-common.h"
 #include "mt2701-afe-clock-ctrl.h"
 
-static const char *aud_clks[MT2701_CLOCK_NUM] = {
-	[MT2701_AUD_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
-	[MT2701_AUD_AUD_MUX1_SEL] = "top_audio_mux1_sel",
-	[MT2701_AUD_AUD_MUX2_SEL] = "top_audio_mux2_sel",
-	[MT2701_AUD_AUD_MUX1_DIV] = "top_audio_mux1_div",
-	[MT2701_AUD_AUD_MUX2_DIV] = "top_audio_mux2_div",
-	[MT2701_AUD_AUD_48K_TIMING] = "top_audio_48k_timing",
-	[MT2701_AUD_AUD_44K_TIMING] = "top_audio_44k_timing",
-	[MT2701_AUD_AUDPLL_MUX_SEL] = "top_audpll_mux_sel",
-	[MT2701_AUD_APLL_SEL] = "top_apll_sel",
-	[MT2701_AUD_AUD1PLL_98M] = "top_aud1_pll_98M",
-	[MT2701_AUD_AUD2PLL_90M] = "top_aud2_pll_90M",
-	[MT2701_AUD_HADDS2PLL_98M] = "top_hadds2_pll_98M",
-	[MT2701_AUD_HADDS2PLL_294M] = "top_hadds2_pll_294M",
-	[MT2701_AUD_AUDPLL] = "top_audpll",
-	[MT2701_AUD_AUDPLL_D4] = "top_audpll_d4",
-	[MT2701_AUD_AUDPLL_D8] = "top_audpll_d8",
-	[MT2701_AUD_AUDPLL_D16] = "top_audpll_d16",
-	[MT2701_AUD_AUDPLL_D24] = "top_audpll_d24",
-	[MT2701_AUD_AUDINTBUS] = "top_audintbus_sel",
-	[MT2701_AUD_CLK_26M] = "clk_26m",
-	[MT2701_AUD_SYSPLL1_D4] = "top_syspll1_d4",
-	[MT2701_AUD_AUD_K1_SRC_SEL] = "top_aud_k1_src_sel",
-	[MT2701_AUD_AUD_K2_SRC_SEL] = "top_aud_k2_src_sel",
-	[MT2701_AUD_AUD_K3_SRC_SEL] = "top_aud_k3_src_sel",
-	[MT2701_AUD_AUD_K4_SRC_SEL] = "top_aud_k4_src_sel",
-	[MT2701_AUD_AUD_K5_SRC_SEL] = "top_aud_k5_src_sel",
-	[MT2701_AUD_AUD_K6_SRC_SEL] = "top_aud_k6_src_sel",
-	[MT2701_AUD_AUD_K1_SRC_DIV] = "top_aud_k1_src_div",
-	[MT2701_AUD_AUD_K2_SRC_DIV] = "top_aud_k2_src_div",
-	[MT2701_AUD_AUD_K3_SRC_DIV] = "top_aud_k3_src_div",
-	[MT2701_AUD_AUD_K4_SRC_DIV] = "top_aud_k4_src_div",
-	[MT2701_AUD_AUD_K5_SRC_DIV] = "top_aud_k5_src_div",
-	[MT2701_AUD_AUD_K6_SRC_DIV] = "top_aud_k6_src_div",
-	[MT2701_AUD_AUD_I2S1_MCLK] = "top_aud_i2s1_mclk",
-	[MT2701_AUD_AUD_I2S2_MCLK] = "top_aud_i2s2_mclk",
-	[MT2701_AUD_AUD_I2S3_MCLK] = "top_aud_i2s3_mclk",
-	[MT2701_AUD_AUD_I2S4_MCLK] = "top_aud_i2s4_mclk",
-	[MT2701_AUD_AUD_I2S5_MCLK] = "top_aud_i2s5_mclk",
-	[MT2701_AUD_AUD_I2S6_MCLK] = "top_aud_i2s6_mclk",
-	[MT2701_AUD_ASM_M_SEL] = "top_asm_m_sel",
-	[MT2701_AUD_ASM_H_SEL] = "top_asm_h_sel",
-	[MT2701_AUD_UNIVPLL2_D4] = "top_univpll2_d4",
-	[MT2701_AUD_UNIVPLL2_D2] = "top_univpll2_d2",
-	[MT2701_AUD_SYSPLL_D5] = "top_syspll_d5",
+static const char *const base_clks[] = {
+	[MT2701_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
+	[MT2701_TOP_AUD_MCLK_SRC0] = "top_audio_mux1_sel",
+	[MT2701_TOP_AUD_MCLK_SRC1] = "top_audio_mux2_sel",
+	[MT2701_TOP_AUD_A1SYS] = "top_audio_a1sys_hp",
+	[MT2701_TOP_AUD_A2SYS] = "top_audio_a2sys_hp",
+	[MT2701_AUDSYS_AFE] = "audio_afe_pd",
+	[MT2701_AUDSYS_AFE_CONN] = "audio_afe_conn_pd",
+	[MT2701_AUDSYS_A1SYS] = "audio_a1sys_pd",
+	[MT2701_AUDSYS_A2SYS] = "audio_a2sys_pd",
 };
 
 int mt2701_init_clock(struct mtk_base_afe *afe)
 {
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
-	int i = 0;
-
-	for (i = 0; i < MT2701_CLOCK_NUM; i++) {
-		afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
-		if (IS_ERR(afe_priv->clocks[i])) {
-			dev_warn(afe->dev, "%s devm_clk_get %s fail\n",
-				 __func__, aud_clks[i]);
-			return PTR_ERR(aud_clks[i]);
+	int i;
+
+	for (i = 0; i < MT2701_BASE_CLK_NUM; i++) {
+		afe_priv->base_ck[i] = devm_clk_get(afe->dev, base_clks[i]);
+		if (IS_ERR(afe_priv->base_ck[i])) {
+			dev_err(afe->dev, "failed to get %s\n", base_clks[i]);
+			return PTR_ERR(afe_priv->base_ck[i]);
+		}
+	}
+
+	/* Get I2S related clocks */
+	for (i = 0; i < MT2701_I2S_NUM; i++) {
+		struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i];
+		char name[13];
+
+		snprintf(name, sizeof(name), "i2s%d_src_sel", i);
+		i2s_path->sel_ck = devm_clk_get(afe->dev, name);
+		if (IS_ERR(i2s_path->sel_ck)) {
+			dev_err(afe->dev, "failed to get %s\n", name);
+			return PTR_ERR(i2s_path->sel_ck);
+		}
+
+		snprintf(name, sizeof(name), "i2s%d_src_div", i);
+		i2s_path->div_ck = devm_clk_get(afe->dev, name);
+		if (IS_ERR(i2s_path->div_ck)) {
+			dev_err(afe->dev, "failed to get %s\n", name);
+			return PTR_ERR(i2s_path->div_ck);
+		}
+
+		snprintf(name, sizeof(name), "i2s%d_mclk_en", i);
+		i2s_path->mclk_ck = devm_clk_get(afe->dev, name);
+		if (IS_ERR(i2s_path->mclk_ck)) {
+			dev_err(afe->dev, "failed to get %s\n", name);
+			return PTR_ERR(i2s_path->mclk_ck);
+		}
+
+		snprintf(name, sizeof(name), "i2so%d_hop_ck", i);
+		i2s_path->hop_ck[I2S_OUT] = devm_clk_get(afe->dev, name);
+		if (IS_ERR(i2s_path->hop_ck[I2S_OUT])) {
+			dev_err(afe->dev, "failed to get %s\n", name);
+			return PTR_ERR(i2s_path->hop_ck[I2S_OUT]);
+		}
+
+		snprintf(name, sizeof(name), "i2si%d_hop_ck", i);
+		i2s_path->hop_ck[I2S_IN] = devm_clk_get(afe->dev, name);
+		if (IS_ERR(i2s_path->hop_ck[I2S_IN])) {
+			dev_err(afe->dev, "failed to get %s\n", name);
+			return PTR_ERR(i2s_path->hop_ck[I2S_IN]);
+		}
+
+		snprintf(name, sizeof(name), "asrc%d_out_ck", i);
+		i2s_path->asrco_ck = devm_clk_get(afe->dev, name);
+		if (IS_ERR(i2s_path->asrco_ck)) {
+			dev_err(afe->dev, "failed to get %s\n", name);
+			return PTR_ERR(i2s_path->asrco_ck);
 		}
 	}
 
+	/* Some platforms may support BT path */
+	afe_priv->mrgif_ck = devm_clk_get(afe->dev, "audio_mrgif_pd");
+	if (IS_ERR(afe_priv->mrgif_ck)) {
+		if (PTR_ERR(afe_priv->mrgif_ck) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		afe_priv->mrgif_ck = NULL;
+	}
+
 	return 0;
 }
 
-int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir)
 {
-	int ret = 0;
+	struct mt2701_afe_private *afe_priv = afe->platform_priv;
+	struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+	int ret;
 
-	ret = mt2701_turn_on_a1sys_clock(afe);
+	ret = clk_prepare_enable(i2s_path->asrco_ck);
 	if (ret) {
-		dev_err(afe->dev, "%s turn_on_a1sys_clock fail %d\n",
-			__func__, ret);
+		dev_err(afe->dev, "failed to enable ASRC clock %d\n", ret);
 		return ret;
 	}
 
-	ret = mt2701_turn_on_a2sys_clock(afe);
+	ret = clk_prepare_enable(i2s_path->hop_ck[dir]);
 	if (ret) {
-		dev_err(afe->dev, "%s turn_on_a2sys_clock fail %d\n",
-			__func__, ret);
-		mt2701_turn_off_a1sys_clock(afe);
-		return ret;
+		dev_err(afe->dev, "failed to enable I2S clock %d\n", ret);
+		goto err_hop_ck;
 	}
 
-	ret = mt2701_turn_on_afe_clock(afe);
-	if (ret) {
-		dev_err(afe->dev, "%s turn_on_afe_clock fail %d\n",
-			__func__, ret);
-		mt2701_turn_off_a1sys_clock(afe);
-		mt2701_turn_off_a2sys_clock(afe);
-		return ret;
-	}
+	return 0;
 
-	regmap_update_bits(afe->regmap, ASYS_TOP_CON,
-			   AUDIO_TOP_CON0_A1SYS_A2SYS_ON,
-			   AUDIO_TOP_CON0_A1SYS_A2SYS_ON);
-	regmap_update_bits(afe->regmap, AFE_DAC_CON0,
-			   AFE_DAC_CON0_AFE_ON,
-			   AFE_DAC_CON0_AFE_ON);
-	regmap_write(afe->regmap, PWR2_TOP_CON,
-		     PWR2_TOP_CON_INIT_VAL);
-	regmap_write(afe->regmap, PWR1_ASM_CON1,
-		     PWR1_ASM_CON1_INIT_VAL);
-	regmap_write(afe->regmap, PWR2_ASM_CON1,
-		     PWR2_ASM_CON1_INIT_VAL);
+err_hop_ck:
+	clk_disable_unprepare(i2s_path->asrco_ck);
 
-	return 0;
+	return ret;
 }
 
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe)
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir)
 {
-	mt2701_turn_off_afe_clock(afe);
-	mt2701_turn_off_a1sys_clock(afe);
-	mt2701_turn_off_a2sys_clock(afe);
-	regmap_update_bits(afe->regmap, ASYS_TOP_CON,
-			   AUDIO_TOP_CON0_A1SYS_A2SYS_ON, 0);
-	regmap_update_bits(afe->regmap, AFE_DAC_CON0,
-			   AFE_DAC_CON0_AFE_ON, 0);
+	struct mt2701_afe_private *afe_priv = afe->platform_priv;
+	struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+
+	clk_disable_unprepare(i2s_path->hop_ck[dir]);
+	clk_disable_unprepare(i2s_path->asrco_ck);
 }
 
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id)
 {
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
-	int ret = 0;
-
-	/* Set Mux */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
-		goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
-	}
-
-	ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL],
-			     afe_priv->clocks[MT2701_AUD_AUD1PLL_98M]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-			aud_clks[MT2701_AUD_AUD_MUX1_SEL],
-			aud_clks[MT2701_AUD_AUD1PLL_98M], ret);
-		goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
-	}
-
-	/* Set Divider */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__,
-			aud_clks[MT2701_AUD_AUD_MUX1_DIV],
-			ret);
-		goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
-	}
-
-	ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV],
-			   MT2701_AUD_AUD_MUX1_DIV_RATE);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
-			aud_clks[MT2701_AUD_AUD_MUX1_DIV],
-			MT2701_AUD_AUD_MUX1_DIV_RATE, ret);
-		goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
-	}
+	struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
 
-	/* Enable clock gate */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_AUD_48K_TIMING], ret);
-		goto A1SYS_CLK_AUD_48K_ERR;
-	}
+	return clk_prepare_enable(i2s_path->mclk_ck);
+}
 
-	/* Enable infra audio */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
-		goto A1SYS_CLK_INFRA_ERR;
-	}
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id)
+{
+	struct mt2701_afe_private *afe_priv = afe->platform_priv;
+	struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
 
-	return 0;
+	clk_disable_unprepare(i2s_path->mclk_ck);
+}
 
-A1SYS_CLK_INFRA_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A1SYS_CLK_AUD_48K_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-A1SYS_CLK_AUD_MUX1_DIV_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-A1SYS_CLK_AUD_MUX1_SEL_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe)
+{
+	struct mt2701_afe_private *afe_priv = afe->platform_priv;
 
-	return ret;
+	return clk_prepare_enable(afe_priv->mrgif_ck);
 }
 
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe)
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe)
 {
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
 
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+	clk_disable_unprepare(afe_priv->mrgif_ck);
 }
 
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe)
+static int mt2701_afe_enable_audsys(struct mtk_base_afe *afe)
 {
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
-	int ret = 0;
+	int ret;
 
-	/* Set Mux */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
-		goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
-	}
+	/* Enable infra clock gate */
+	ret = clk_prepare_enable(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
+	if (ret)
+		return ret;
 
-	ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL],
-			     afe_priv->clocks[MT2701_AUD_AUD2PLL_90M]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-			aud_clks[MT2701_AUD_AUD_MUX2_SEL],
-			aud_clks[MT2701_AUD_AUD2PLL_90M], ret);
-		goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
-	}
+	/* Enable top a1sys clock gate */
+	ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+	if (ret)
+		goto err_a1sys;
 
-	/* Set Divider */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], ret);
-		goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
-	}
+	/* Enable top a2sys clock gate */
+	ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+	if (ret)
+		goto err_a2sys;
 
-	ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV],
-			   MT2701_AUD_AUD_MUX2_DIV_RATE);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
-			aud_clks[MT2701_AUD_AUD_MUX2_DIV],
-			MT2701_AUD_AUD_MUX2_DIV_RATE, ret);
-		goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
-	}
+	/* Internal clock gates */
+	ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+	if (ret)
+		goto err_afe;
 
-	/* Enable clock gate */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_AUD_44K_TIMING], ret);
-		goto A2SYS_CLK_AUD_44K_ERR;
-	}
+	ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+	if (ret)
+		goto err_audio_a1sys;
 
-	/* Enable infra audio */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
-		goto A2SYS_CLK_INFRA_ERR;
-	}
+	ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+	if (ret)
+		goto err_audio_a2sys;
+
+	ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+	if (ret)
+		goto err_afe_conn;
 
 	return 0;
 
-A2SYS_CLK_INFRA_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A2SYS_CLK_AUD_44K_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-A2SYS_CLK_AUD_MUX2_DIV_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-A2SYS_CLK_AUD_MUX2_SEL_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+err_afe_conn:
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+err_audio_a2sys:
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+err_audio_a1sys:
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+err_afe:
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+err_a2sys:
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+err_a1sys:
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
 
 	return ret;
 }
 
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe)
+static void mt2701_afe_disable_audsys(struct mtk_base_afe *afe)
 {
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
 
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+	clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
 }
 
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
 {
-	struct mt2701_afe_private *afe_priv = afe->platform_priv;
 	int ret;
 
-	/* enable INFRA_SYS */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
-		goto AFE_AUD_INFRA_ERR;
-	}
-
-	/* Set MT2701_AUD_AUDINTBUS to MT2701_AUD_SYSPLL1_D4 */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_AUDINTBUS], ret);
-		goto AFE_AUD_AUDINTBUS_ERR;
-	}
-
-	ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUDINTBUS],
-			     afe_priv->clocks[MT2701_AUD_SYSPLL1_D4]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-			aud_clks[MT2701_AUD_AUDINTBUS],
-			aud_clks[MT2701_AUD_SYSPLL1_D4], ret);
-		goto AFE_AUD_AUDINTBUS_ERR;
-	}
-
-	/* Set MT2701_AUD_ASM_H_SEL to MT2701_AUD_UNIVPLL2_D2 */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+	/* Enable audio system */
+	ret = mt2701_afe_enable_audsys(afe);
 	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_ASM_H_SEL], ret);
-		goto AFE_AUD_ASM_H_ERR;
-	}
-
-	ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_H_SEL],
-			     afe_priv->clocks[MT2701_AUD_UNIVPLL2_D2]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-			aud_clks[MT2701_AUD_ASM_H_SEL],
-			aud_clks[MT2701_AUD_UNIVPLL2_D2], ret);
-		goto AFE_AUD_ASM_H_ERR;
-	}
-
-	/* Set MT2701_AUD_ASM_M_SEL to MT2701_AUD_UNIVPLL2_D4 */
-	ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[MT2701_AUD_ASM_M_SEL], ret);
-		goto AFE_AUD_ASM_M_ERR;
+		dev_err(afe->dev, "failed to enable audio system %d\n", ret);
+		return ret;
 	}
 
-	ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_M_SEL],
-			     afe_priv->clocks[MT2701_AUD_UNIVPLL2_D4]);
-	if (ret) {
-		dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
-			aud_clks[MT2701_AUD_ASM_M_SEL],
-			aud_clks[MT2701_AUD_UNIVPLL2_D4], ret);
-		goto AFE_AUD_ASM_M_ERR;
-	}
+	regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+			   ASYS_TOP_CON_ASYS_TIMING_ON,
+			   ASYS_TOP_CON_ASYS_TIMING_ON);
+	regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+			   AFE_DAC_CON0_AFE_ON,
+			   AFE_DAC_CON0_AFE_ON);
 
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-			   AUDIO_TOP_CON0_PDN_AFE, 0);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-			   AUDIO_TOP_CON0_PDN_APLL_CK, 0);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   AUDIO_TOP_CON4_PDN_A1SYS, 0);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   AUDIO_TOP_CON4_PDN_A2SYS, 0);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   AUDIO_TOP_CON4_PDN_AFE_CONN, 0);
+	/* Configure ASRC */
+	regmap_write(afe->regmap, PWR1_ASM_CON1, PWR1_ASM_CON1_INIT_VAL);
+	regmap_write(afe->regmap, PWR2_ASM_CON1, PWR2_ASM_CON1_INIT_VAL);
 
 	return 0;
-
-AFE_AUD_ASM_M_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-AFE_AUD_ASM_H_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
-AFE_AUD_AUDINTBUS_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-AFE_AUD_INFRA_ERR:
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
-	return ret;
 }
 
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe)
 {
-	struct mt2701_afe_private *afe_priv = afe->platform_priv;
+	regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+			   ASYS_TOP_CON_ASYS_TIMING_ON, 0);
+	regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+			   AFE_DAC_CON0_AFE_ON, 0);
+
+	mt2701_afe_disable_audsys(afe);
 
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
-	clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-			   AUDIO_TOP_CON0_PDN_AFE, AUDIO_TOP_CON0_PDN_AFE);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
-			   AUDIO_TOP_CON0_PDN_APLL_CK,
-			   AUDIO_TOP_CON0_PDN_APLL_CK);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   AUDIO_TOP_CON4_PDN_A1SYS,
-			   AUDIO_TOP_CON4_PDN_A1SYS);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   AUDIO_TOP_CON4_PDN_A2SYS,
-			   AUDIO_TOP_CON4_PDN_A2SYS);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   AUDIO_TOP_CON4_PDN_AFE_CONN,
-			   AUDIO_TOP_CON4_PDN_AFE_CONN);
+	return 0;
 }
 
 void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
 			       int mclk)
 {
-	struct mt2701_afe_private *afe_priv = afe->platform_priv;
+	struct mt2701_afe_private *priv = afe->platform_priv;
+	struct mt2701_i2s_path *i2s_path = &priv->i2s_path[id];
 	int ret;
-	int aud_src_div_id = MT2701_AUD_AUD_K1_SRC_DIV + id;
-	int aud_src_clk_id = MT2701_AUD_AUD_K1_SRC_SEL + id;
 
-	/* Set MCLK Kx_SRC_SEL(domain) */
-	ret = clk_prepare_enable(afe_priv->clocks[aud_src_clk_id]);
-	if (ret)
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[aud_src_clk_id], ret);
-
-	if (domain == 0) {
-		ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
-				     afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
-		if (ret)
-			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
-				__func__, aud_clks[aud_src_clk_id],
-				aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
-	} else {
-		ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
-				     afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
-		if (ret)
-			dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
-				__func__, aud_clks[aud_src_clk_id],
-				aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
-	}
-	clk_disable_unprepare(afe_priv->clocks[aud_src_clk_id]);
+	/* Set mclk source */
+	if (domain == 0)
+		ret = clk_set_parent(i2s_path->sel_ck,
+				     priv->base_ck[MT2701_TOP_AUD_MCLK_SRC0]);
+	else
+		ret = clk_set_parent(i2s_path->sel_ck,
+				     priv->base_ck[MT2701_TOP_AUD_MCLK_SRC1]);
 
-	/* Set MCLK Kx_SRC_DIV(divider) */
-	ret = clk_prepare_enable(afe_priv->clocks[aud_src_div_id]);
 	if (ret)
-		dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
-			__func__, aud_clks[aud_src_div_id], ret);
+		dev_err(afe->dev, "failed to set domain%d mclk source %d\n",
+			domain, ret);
 
-	ret = clk_set_rate(afe_priv->clocks[aud_src_div_id], mclk);
+	/* Set mclk divider */
+	ret = clk_set_rate(i2s_path->div_ck, mclk);
 	if (ret)
-		dev_err(afe->dev, "%s clk_set_rate %s-%d fail %d\n", __func__,
-			aud_clks[aud_src_div_id], mclk, ret);
-	clk_disable_unprepare(afe_priv->clocks[aud_src_div_id]);
+		dev_err(afe->dev, "failed to set mclk divider %d\n", ret);
 }
-
-MODULE_DESCRIPTION("MT2701 afe clock control");
-MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
index 6497d570cf097df78ef529157f3009e447b70213..15417d9d6597879d9b567f11c3fa0179267747f1 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
@@ -21,16 +21,15 @@ struct mtk_base_afe;
 
 int mt2701_init_clock(struct mtk_base_afe *afe);
 int mt2701_afe_enable_clock(struct mtk_base_afe *afe);
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe);
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe);
 
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe);
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir);
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir);
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id);
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id);
 
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe);
-
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe);
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe);
 
 void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
 			       int mclk);
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-common.h b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
index c19430e98adf172bd71476294030fef3e4a75f76..ae8ddeacfbfe25584c2e9c6a2e6133a959f754bf 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-common.h
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
@@ -16,6 +16,7 @@
 
 #ifndef _MT_2701_AFE_COMMON_H_
 #define _MT_2701_AFE_COMMON_H_
+
 #include <sound/soc.h>
 #include <linux/clk.h>
 #include <linux/regmap.h>
@@ -25,16 +26,7 @@
 #define MT2701_STREAM_DIR_NUM (SNDRV_PCM_STREAM_LAST + 1)
 #define MT2701_PLL_DOMAIN_0_RATE	98304000
 #define MT2701_PLL_DOMAIN_1_RATE	90316800
-#define MT2701_AUD_AUD_MUX1_DIV_RATE (MT2701_PLL_DOMAIN_0_RATE / 2)
-#define MT2701_AUD_AUD_MUX2_DIV_RATE (MT2701_PLL_DOMAIN_1_RATE / 2)
-
-enum {
-	MT2701_I2S_1,
-	MT2701_I2S_2,
-	MT2701_I2S_3,
-	MT2701_I2S_4,
-	MT2701_I2S_NUM,
-};
+#define MT2701_I2S_NUM	4
 
 enum {
 	MT2701_MEMIF_DL1,
@@ -62,60 +54,23 @@ enum {
 };
 
 enum {
-	MT2701_IRQ_ASYS_START,
-	MT2701_IRQ_ASYS_IRQ1 = MT2701_IRQ_ASYS_START,
+	MT2701_IRQ_ASYS_IRQ1,
 	MT2701_IRQ_ASYS_IRQ2,
 	MT2701_IRQ_ASYS_IRQ3,
 	MT2701_IRQ_ASYS_END,
 };
 
-/* 2701 clock def */
-enum audio_system_clock_type {
-	MT2701_AUD_INFRA_SYS_AUDIO,
-	MT2701_AUD_AUD_MUX1_SEL,
-	MT2701_AUD_AUD_MUX2_SEL,
-	MT2701_AUD_AUD_MUX1_DIV,
-	MT2701_AUD_AUD_MUX2_DIV,
-	MT2701_AUD_AUD_48K_TIMING,
-	MT2701_AUD_AUD_44K_TIMING,
-	MT2701_AUD_AUDPLL_MUX_SEL,
-	MT2701_AUD_APLL_SEL,
-	MT2701_AUD_AUD1PLL_98M,
-	MT2701_AUD_AUD2PLL_90M,
-	MT2701_AUD_HADDS2PLL_98M,
-	MT2701_AUD_HADDS2PLL_294M,
-	MT2701_AUD_AUDPLL,
-	MT2701_AUD_AUDPLL_D4,
-	MT2701_AUD_AUDPLL_D8,
-	MT2701_AUD_AUDPLL_D16,
-	MT2701_AUD_AUDPLL_D24,
-	MT2701_AUD_AUDINTBUS,
-	MT2701_AUD_CLK_26M,
-	MT2701_AUD_SYSPLL1_D4,
-	MT2701_AUD_AUD_K1_SRC_SEL,
-	MT2701_AUD_AUD_K2_SRC_SEL,
-	MT2701_AUD_AUD_K3_SRC_SEL,
-	MT2701_AUD_AUD_K4_SRC_SEL,
-	MT2701_AUD_AUD_K5_SRC_SEL,
-	MT2701_AUD_AUD_K6_SRC_SEL,
-	MT2701_AUD_AUD_K1_SRC_DIV,
-	MT2701_AUD_AUD_K2_SRC_DIV,
-	MT2701_AUD_AUD_K3_SRC_DIV,
-	MT2701_AUD_AUD_K4_SRC_DIV,
-	MT2701_AUD_AUD_K5_SRC_DIV,
-	MT2701_AUD_AUD_K6_SRC_DIV,
-	MT2701_AUD_AUD_I2S1_MCLK,
-	MT2701_AUD_AUD_I2S2_MCLK,
-	MT2701_AUD_AUD_I2S3_MCLK,
-	MT2701_AUD_AUD_I2S4_MCLK,
-	MT2701_AUD_AUD_I2S5_MCLK,
-	MT2701_AUD_AUD_I2S6_MCLK,
-	MT2701_AUD_ASM_M_SEL,
-	MT2701_AUD_ASM_H_SEL,
-	MT2701_AUD_UNIVPLL2_D4,
-	MT2701_AUD_UNIVPLL2_D2,
-	MT2701_AUD_SYSPLL_D5,
-	MT2701_CLOCK_NUM
+enum audio_base_clock {
+	MT2701_INFRA_SYS_AUDIO,
+	MT2701_TOP_AUD_MCLK_SRC0,
+	MT2701_TOP_AUD_MCLK_SRC1,
+	MT2701_TOP_AUD_A1SYS,
+	MT2701_TOP_AUD_A2SYS,
+	MT2701_AUDSYS_AFE,
+	MT2701_AUDSYS_AFE_CONN,
+	MT2701_AUDSYS_A1SYS,
+	MT2701_AUDSYS_A2SYS,
+	MT2701_BASE_CLK_NUM,
 };
 
 static const unsigned int mt2701_afe_backup_list[] = {
@@ -139,12 +94,8 @@ static const unsigned int mt2701_afe_backup_list[] = {
 	AFE_MEMIF_PBUF_SIZE,
 };
 
-struct snd_pcm_substream;
-struct mtk_base_irq_data;
-
 struct mt2701_i2s_data {
 	int i2s_ctrl_reg;
-	int i2s_pwn_shift;
 	int i2s_asrc_fs_shift;
 	int i2s_asrc_fs_mask;
 };
@@ -160,12 +111,18 @@ struct mt2701_i2s_path {
 	int mclk_rate;
 	int on[I2S_DIR_NUM];
 	int occupied[I2S_DIR_NUM];
-	const struct mt2701_i2s_data *i2s_data[2];
+	const struct mt2701_i2s_data *i2s_data[I2S_DIR_NUM];
+	struct clk *hop_ck[I2S_DIR_NUM];
+	struct clk *sel_ck;
+	struct clk *div_ck;
+	struct clk *mclk_ck;
+	struct clk *asrco_ck;
 };
 
 struct mt2701_afe_private {
-	struct clk *clocks[MT2701_CLOCK_NUM];
 	struct mt2701_i2s_path i2s_path[MT2701_I2S_NUM];
+	struct clk *base_ck[MT2701_BASE_CLK_NUM];
+	struct clk *mrgif_ck;
 	bool mrg_enable[MT2701_STREAM_DIR_NUM];
 };
 
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
index 8fda182f849b1758b776192752f1654add8ab2e7..f0cd08fa5c5dec32bbe0734395d23a1e1ef4b61a 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
@@ -17,19 +17,16 @@
 
 #include <linux/delay.h>
 #include <linux/module.h>
+#include <linux/mfd/syscon.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/pm_runtime.h>
-#include <sound/soc.h>
 
 #include "mt2701-afe-common.h"
-
 #include "mt2701-afe-clock-ctrl.h"
 #include "../common/mtk-afe-platform-driver.h"
 #include "../common/mtk-afe-fe-dai.h"
 
-#define AFE_IRQ_STATUS_BITS	0xff
-
 static const struct snd_pcm_hardware mt2701_afe_hardware = {
 	.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED
 		| SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID,
@@ -97,40 +94,26 @@ static int mt2701_afe_i2s_startup(struct snd_pcm_substream *substream,
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
-	struct mt2701_afe_private *afe_priv = afe->platform_priv;
 	int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
-	int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
-	int ret = 0;
 
 	if (i2s_num < 0)
 		return i2s_num;
 
-	/* enable mclk */
-	ret = clk_prepare_enable(afe_priv->clocks[clk_num]);
-	if (ret)
-		dev_err(afe->dev, "Failed to enable mclk for I2S: %d\n",
-			i2s_num);
-
-	return ret;
+	return mt2701_afe_enable_mclk(afe, i2s_num);
 }
 
 static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
 					struct snd_soc_dai *dai,
+					int i2s_num,
 					int dir_invert)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
-	int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
-	struct mt2701_i2s_path *i2s_path;
+	struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
 	const struct mt2701_i2s_data *i2s_data;
 	int stream_dir = substream->stream;
 
-	if (i2s_num < 0)
-		return i2s_num;
-
-	i2s_path = &afe_priv->i2s_path[i2s_num];
-
 	if (dir_invert)	{
 		if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
 			stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -151,9 +134,9 @@ static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
 	/* disable i2s */
 	regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
 			   ASYS_I2S_CON_I2S_EN, 0);
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   1 << i2s_data->i2s_pwn_shift,
-			   1 << i2s_data->i2s_pwn_shift);
+
+	mt2701_afe_disable_i2s(afe, i2s_num, stream_dir);
+
 	return 0;
 }
 
@@ -165,7 +148,6 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
 	int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
 	struct mt2701_i2s_path *i2s_path;
-	int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
 
 	if (i2s_num < 0)
 		return;
@@ -177,37 +159,32 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
 	else
 		goto I2S_UNSTART;
 
-	mt2701_afe_i2s_path_shutdown(substream, dai, 0);
+	mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 0);
 
 	/* need to disable i2s-out path when disable i2s-in */
 	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-		mt2701_afe_i2s_path_shutdown(substream, dai, 1);
+		mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 1);
 
 I2S_UNSTART:
 	/* disable mclk */
-	clk_disable_unprepare(afe_priv->clocks[clk_num]);
+	mt2701_afe_disable_mclk(afe, i2s_num);
 }
 
 static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
 					  struct snd_soc_dai *dai,
+					  int i2s_num,
 					  int dir_invert)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
-	int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
-	struct mt2701_i2s_path *i2s_path;
+	struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
 	const struct mt2701_i2s_data *i2s_data;
 	struct snd_pcm_runtime * const runtime = substream->runtime;
 	int reg, fs, w_len = 1; /* now we support bck 64bits only */
 	int stream_dir = substream->stream;
 	unsigned int mask = 0, val = 0;
 
-	if (i2s_num < 0)
-		return i2s_num;
-
-	i2s_path = &afe_priv->i2s_path[i2s_num];
-
 	if (dir_invert) {
 		if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
 			stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -251,9 +228,7 @@ static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
 			   fs << i2s_data->i2s_asrc_fs_shift);
 
 	/* enable i2s */
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   1 << i2s_data->i2s_pwn_shift,
-			   0 << i2s_data->i2s_pwn_shift);
+	mt2701_afe_enable_i2s(afe, i2s_num, stream_dir);
 
 	/* reset i2s hw status before enable */
 	regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
@@ -300,13 +275,13 @@ static int mt2701_afe_i2s_prepare(struct snd_pcm_substream *substream,
 	mt2701_mclk_configuration(afe, i2s_num, clk_domain, mclk_rate);
 
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		mt2701_i2s_path_prepare_enable(substream, dai, 0);
+		mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
 	} else {
 		/* need to enable i2s-out path when enable i2s-in */
 		/* prepare for another direction "out" */
-		mt2701_i2s_path_prepare_enable(substream, dai, 1);
+		mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 1);
 		/* prepare for "in" */
-		mt2701_i2s_path_prepare_enable(substream, dai, 0);
+		mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
 	}
 
 	return 0;
@@ -339,9 +314,11 @@ static int mt2701_btmrg_startup(struct snd_pcm_substream *substream,
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
 	struct mt2701_afe_private *afe_priv = afe->platform_priv;
+	int ret;
 
-	regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-			   AUDIO_TOP_CON4_PDN_MRGIF, 0);
+	ret = mt2701_enable_btmrg_clk(afe);
+	if (ret)
+		return ret;
 
 	afe_priv->mrg_enable[substream->stream] = 1;
 	return 0;
@@ -406,9 +383,7 @@ static void mt2701_btmrg_shutdown(struct snd_pcm_substream *substream,
 				   AFE_MRGIF_CON_MRG_EN, 0);
 		regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
 				   AFE_MRGIF_CON_MRG_I2S_EN, 0);
-		regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
-				   AUDIO_TOP_CON4_PDN_MRGIF,
-				   AUDIO_TOP_CON4_PDN_MRGIF);
+		mt2701_disable_btmrg_clk(afe);
 	}
 	afe_priv->mrg_enable[substream->stream] = 0;
 }
@@ -574,7 +549,6 @@ static const struct snd_soc_dai_ops mt2701_single_memif_dai_ops = {
 	.hw_free	= mtk_afe_fe_hw_free,
 	.prepare	= mtk_afe_fe_prepare,
 	.trigger	= mtk_afe_fe_trigger,
-
 };
 
 static const struct snd_soc_dai_ops mt2701_dlm_memif_dai_ops = {
@@ -915,31 +889,6 @@ static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s4[] = {
 				    PWR2_TOP_CON, 19, 1, 0),
 };
 
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc0[] = {
-	SOC_DAPM_SINGLE_AUTODISABLE("Asrc0 out Switch", AUDIO_TOP_CON4, 14, 1,
-				    1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc1[] = {
-	SOC_DAPM_SINGLE_AUTODISABLE("Asrc1 out Switch", AUDIO_TOP_CON4, 15, 1,
-				    1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc2[] = {
-	SOC_DAPM_SINGLE_AUTODISABLE("Asrc2 out Switch", PWR2_TOP_CON, 6, 1,
-				    1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc3[] = {
-	SOC_DAPM_SINGLE_AUTODISABLE("Asrc3 out Switch", PWR2_TOP_CON, 7, 1,
-				    1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc4[] = {
-	SOC_DAPM_SINGLE_AUTODISABLE("Asrc4 out Switch", PWR2_TOP_CON, 8, 1,
-				    1),
-};
-
 static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
 	/* inter-connections */
 	SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -999,19 +948,6 @@ static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
 	SND_SOC_DAPM_MIXER("I18I19", SND_SOC_NOPM, 0, 0,
 			   mt2701_afe_multi_ch_out_i2s3,
 			   ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s3)),
-
-	SND_SOC_DAPM_MIXER("ASRC_O0", SND_SOC_NOPM, 0, 0,
-			   mt2701_afe_multi_ch_out_asrc0,
-			   ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc0)),
-	SND_SOC_DAPM_MIXER("ASRC_O1", SND_SOC_NOPM, 0, 0,
-			   mt2701_afe_multi_ch_out_asrc1,
-			   ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc1)),
-	SND_SOC_DAPM_MIXER("ASRC_O2", SND_SOC_NOPM, 0, 0,
-			   mt2701_afe_multi_ch_out_asrc2,
-			   ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc2)),
-	SND_SOC_DAPM_MIXER("ASRC_O3", SND_SOC_NOPM, 0, 0,
-			   mt2701_afe_multi_ch_out_asrc3,
-			   ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc3)),
 };
 
 static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
@@ -1021,7 +957,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
 
 	{"I2S0 Playback", NULL, "O15"},
 	{"I2S0 Playback", NULL, "O16"},
-
 	{"I2S1 Playback", NULL, "O17"},
 	{"I2S1 Playback", NULL, "O18"},
 	{"I2S2 Playback", NULL, "O19"},
@@ -1038,7 +973,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
 
 	{"I00", NULL, "I2S0 Capture"},
 	{"I01", NULL, "I2S0 Capture"},
-
 	{"I02", NULL, "I2S1 Capture"},
 	{"I03", NULL, "I2S1 Capture"},
 	/* I02,03 link to UL2, also need to open I2S0 */
@@ -1046,15 +980,10 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
 
 	{"I26", NULL, "BT Capture"},
 
-	{"ASRC_O0", "Asrc0 out Switch", "DLM"},
-	{"ASRC_O1", "Asrc1 out Switch", "DLM"},
-	{"ASRC_O2", "Asrc2 out Switch", "DLM"},
-	{"ASRC_O3", "Asrc3 out Switch", "DLM"},
-
-	{"I12I13", "Multich I2S0 Out Switch", "ASRC_O0"},
-	{"I14I15", "Multich I2S1 Out Switch", "ASRC_O1"},
-	{"I16I17", "Multich I2S2 Out Switch", "ASRC_O2"},
-	{"I18I19", "Multich I2S3 Out Switch", "ASRC_O3"},
+	{"I12I13", "Multich I2S0 Out Switch", "DLM"},
+	{"I14I15", "Multich I2S1 Out Switch", "DLM"},
+	{"I16I17", "Multich I2S2 Out Switch", "DLM"},
+	{"I18I19", "Multich I2S3 Out Switch", "DLM"},
 
 	{ "I12", NULL, "I12I13" },
 	{ "I13", NULL, "I12I13" },
@@ -1079,7 +1008,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
 	{ "O21", "I18 Switch", "I18" },
 	{ "O22", "I19 Switch", "I19" },
 	{ "O31", "I35 Switch", "I35" },
-
 };
 
 static const struct snd_soc_component_driver mt2701_afe_pcm_dai_component = {
@@ -1386,14 +1314,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
 	{
 		{
 			.i2s_ctrl_reg = ASYS_I2SO1_CON,
-			.i2s_pwn_shift = 6,
 			.i2s_asrc_fs_shift = 0,
 			.i2s_asrc_fs_mask = 0x1f,
 
 		},
 		{
 			.i2s_ctrl_reg = ASYS_I2SIN1_CON,
-			.i2s_pwn_shift = 0,
 			.i2s_asrc_fs_shift = 0,
 			.i2s_asrc_fs_mask = 0x1f,
 
@@ -1402,14 +1328,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
 	{
 		{
 			.i2s_ctrl_reg = ASYS_I2SO2_CON,
-			.i2s_pwn_shift = 7,
 			.i2s_asrc_fs_shift = 5,
 			.i2s_asrc_fs_mask = 0x1f,
 
 		},
 		{
 			.i2s_ctrl_reg = ASYS_I2SIN2_CON,
-			.i2s_pwn_shift = 1,
 			.i2s_asrc_fs_shift = 5,
 			.i2s_asrc_fs_mask = 0x1f,
 
@@ -1418,14 +1342,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
 	{
 		{
 			.i2s_ctrl_reg = ASYS_I2SO3_CON,
-			.i2s_pwn_shift = 8,
 			.i2s_asrc_fs_shift = 10,
 			.i2s_asrc_fs_mask = 0x1f,
 
 		},
 		{
 			.i2s_ctrl_reg = ASYS_I2SIN3_CON,
-			.i2s_pwn_shift = 2,
 			.i2s_asrc_fs_shift = 10,
 			.i2s_asrc_fs_mask = 0x1f,
 
@@ -1434,14 +1356,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
 	{
 		{
 			.i2s_ctrl_reg = ASYS_I2SO4_CON,
-			.i2s_pwn_shift = 9,
 			.i2s_asrc_fs_shift = 15,
 			.i2s_asrc_fs_mask = 0x1f,
 
 		},
 		{
 			.i2s_ctrl_reg = ASYS_I2SIN4_CON,
-			.i2s_pwn_shift = 3,
 			.i2s_asrc_fs_shift = 15,
 			.i2s_asrc_fs_mask = 0x1f,
 
@@ -1449,14 +1369,6 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
 	},
 };
 
-static const struct regmap_config mt2701_afe_regmap_config = {
-	.reg_bits = 32,
-	.reg_stride = 4,
-	.val_bits = 32,
-	.max_register = AFE_END_ADDR,
-	.cache_type = REGCACHE_NONE,
-};
-
 static irqreturn_t mt2701_asys_isr(int irq_id, void *dev)
 {
 	int id;
@@ -1483,8 +1395,7 @@ static int mt2701_afe_runtime_suspend(struct device *dev)
 {
 	struct mtk_base_afe *afe = dev_get_drvdata(dev);
 
-	mt2701_afe_disable_clock(afe);
-	return 0;
+	return mt2701_afe_disable_clock(afe);
 }
 
 static int mt2701_afe_runtime_resume(struct device *dev)
@@ -1496,21 +1407,22 @@ static int mt2701_afe_runtime_resume(struct device *dev)
 
 static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
 {
+	struct snd_soc_component *component;
 	struct mtk_base_afe *afe;
 	struct mt2701_afe_private *afe_priv;
-	struct resource *res;
 	struct device *dev;
 	int i, irq_id, ret;
 
 	afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
 	if (!afe)
 		return -ENOMEM;
+
 	afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
 					  GFP_KERNEL);
 	if (!afe->platform_priv)
 		return -ENOMEM;
-	afe_priv = afe->platform_priv;
 
+	afe_priv = afe->platform_priv;
 	afe->dev = &pdev->dev;
 	dev = afe->dev;
 
@@ -1527,17 +1439,11 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
 		return ret;
 	}
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
-
-	if (IS_ERR(afe->base_addr))
-		return PTR_ERR(afe->base_addr);
-
-	afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
-		&mt2701_afe_regmap_config);
-	if (IS_ERR(afe->regmap))
-		return PTR_ERR(afe->regmap);
+	afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
+	if (!afe->regmap) {
+		dev_err(dev, "could not get regmap from parent\n");
+		return -ENODEV;
+	}
 
 	mutex_init(&afe->irq_alloc_lock);
 
@@ -1545,7 +1451,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
 	afe->memif_size = MT2701_MEMIF_NUM;
 	afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
 				  GFP_KERNEL);
-
 	if (!afe->memif)
 		return -ENOMEM;
 
@@ -1558,7 +1463,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
 	afe->irqs_size = MT2701_IRQ_ASYS_END;
 	afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
 				 GFP_KERNEL);
-
 	if (!afe->irqs)
 		return -ENOMEM;
 
@@ -1573,10 +1477,15 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
 			= &mt2701_i2s_data[i][I2S_IN];
 	}
 
+	component = kzalloc(sizeof(*component), GFP_KERNEL);
+	if (!component)
+		return -ENOMEM;
+
+	component->regmap = afe->regmap;
+
 	afe->mtk_afe_hardware = &mt2701_afe_hardware;
 	afe->memif_fs = mt2701_memif_fs;
 	afe->irq_fs = mt2701_irq_fs;
-
 	afe->reg_back_up_list = mt2701_afe_backup_list;
 	afe->reg_back_up_list_num = ARRAY_SIZE(mt2701_afe_backup_list);
 	afe->runtime_resume = mt2701_afe_runtime_resume;
@@ -1586,59 +1495,58 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
 	ret = mt2701_init_clock(afe);
 	if (ret) {
 		dev_err(dev, "init clock error\n");
-		return ret;
+		goto err_init_clock;
 	}
 
 	platform_set_drvdata(pdev, afe);
-	pm_runtime_enable(&pdev->dev);
-	if (!pm_runtime_enabled(&pdev->dev))
-		goto err_pm_disable;
-	pm_runtime_get_sync(&pdev->dev);
 
-	ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
+	pm_runtime_enable(dev);
+	if (!pm_runtime_enabled(dev)) {
+		ret = mt2701_afe_runtime_resume(dev);
+		if (ret)
+			goto err_pm_disable;
+	}
+	pm_runtime_get_sync(dev);
+
+	ret = snd_soc_register_platform(dev, &mtk_afe_pcm_platform);
 	if (ret) {
 		dev_warn(dev, "err_platform\n");
 		goto err_platform;
 	}
 
-	ret = snd_soc_register_component(&pdev->dev,
-					 &mt2701_afe_pcm_dai_component,
-					 mt2701_afe_pcm_dais,
-					 ARRAY_SIZE(mt2701_afe_pcm_dais));
+	ret = snd_soc_add_component(dev, component,
+				    &mt2701_afe_pcm_dai_component,
+				    mt2701_afe_pcm_dais,
+				    ARRAY_SIZE(mt2701_afe_pcm_dais));
 	if (ret) {
 		dev_warn(dev, "err_dai_component\n");
 		goto err_dai_component;
 	}
 
-	mt2701_afe_runtime_resume(&pdev->dev);
-
 	return 0;
 
 err_dai_component:
-	snd_soc_unregister_component(&pdev->dev);
-
+	snd_soc_unregister_platform(dev);
 err_platform:
-	snd_soc_unregister_platform(&pdev->dev);
-
+	pm_runtime_put_sync(dev);
 err_pm_disable:
-	pm_runtime_disable(&pdev->dev);
+	pm_runtime_disable(dev);
+err_init_clock:
+	kfree(component);
 
 	return ret;
 }
 
 static int mt2701_afe_pcm_dev_remove(struct platform_device *pdev)
 {
-	struct mtk_base_afe *afe = platform_get_drvdata(pdev);
-
+	pm_runtime_put_sync(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		mt2701_afe_runtime_suspend(&pdev->dev);
-	pm_runtime_put_sync(&pdev->dev);
 
 	snd_soc_unregister_component(&pdev->dev);
 	snd_soc_unregister_platform(&pdev->dev);
-	/* disable afe clock */
-	mt2701_afe_disable_clock(afe);
+
 	return 0;
 }
 
@@ -1670,4 +1578,3 @@ module_platform_driver(mt2701_afe_pcm_driver);
 MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 2701");
 MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
 MODULE_LICENSE("GPL v2");
-
diff --git a/sound/soc/mediatek/mt2701/mt2701-reg.h b/sound/soc/mediatek/mt2701/mt2701-reg.h
index bb62b1c55957201e5952b4ea72a7932cd11bd927..18e676974f22cff17d41edb9ce456d605ef9450e 100644
--- a/sound/soc/mediatek/mt2701/mt2701-reg.h
+++ b/sound/soc/mediatek/mt2701/mt2701-reg.h
@@ -17,17 +17,6 @@
 #ifndef _MT2701_REG_H_
 #define _MT2701_REG_H_
 
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/pm_runtime.h>
-#include <sound/soc.h>
-#include "mt2701-afe-common.h"
-
-/*****************************************************************************
- *                  R E G I S T E R       D E F I N I T I O N
- *****************************************************************************/
 #define AUDIO_TOP_CON0 0x0000
 #define AUDIO_TOP_CON4 0x0010
 #define AUDIO_TOP_CON5 0x0014
@@ -109,18 +98,6 @@
 #define AFE_DAI_BASE 0x1370
 #define AFE_DAI_CUR 0x137c
 
-/* AUDIO_TOP_CON0 (0x0000) */
-#define AUDIO_TOP_CON0_A1SYS_A2SYS_ON	(0x3 << 0)
-#define AUDIO_TOP_CON0_PDN_AFE		(0x1 << 2)
-#define AUDIO_TOP_CON0_PDN_APLL_CK	(0x1 << 23)
-
-/* AUDIO_TOP_CON4 (0x0010) */
-#define AUDIO_TOP_CON4_I2SO1_PWN	(0x1 << 6)
-#define AUDIO_TOP_CON4_PDN_A1SYS	(0x1 << 21)
-#define AUDIO_TOP_CON4_PDN_A2SYS	(0x1 << 22)
-#define AUDIO_TOP_CON4_PDN_AFE_CONN	(0x1 << 23)
-#define AUDIO_TOP_CON4_PDN_MRGIF	(0x1 << 25)
-
 /* AFE_DAIBT_CON0 (0x001c) */
 #define AFE_DAIBT_CON0_DAIBT_EN		(0x1 << 0)
 #define AFE_DAIBT_CON0_BT_FUNC_EN	(0x1 << 1)
@@ -137,22 +114,8 @@
 #define AFE_MRGIF_CON_I2S_MODE_MASK	(0xf << 20)
 #define AFE_MRGIF_CON_I2S_MODE_32K	(0x4 << 20)
 
-/* ASYS_I2SO1_CON (0x061c) */
-#define ASYS_I2SO1_CON_FS		(0x1f << 8)
-#define ASYS_I2SO1_CON_FS_SET(x)	((x) << 8)
-#define ASYS_I2SO1_CON_MULTI_CH		(0x1 << 16)
-#define ASYS_I2SO1_CON_SIDEGEN		(0x1 << 30)
-#define ASYS_I2SO1_CON_I2S_EN		(0x1 << 0)
-/* 0:EIAJ 1:I2S */
-#define ASYS_I2SO1_CON_I2S_MODE		(0x1 << 3)
-#define ASYS_I2SO1_CON_WIDE_MODE	(0x1 << 1)
-#define ASYS_I2SO1_CON_WIDE_MODE_SET(x)	((x) << 1)
-
-/* PWR2_TOP_CON (0x0634) */
-#define PWR2_TOP_CON_INIT_VAL		(0xffe1ffff)
-
-/* ASYS_IRQ_CLR (0x07c0) */
-#define ASYS_IRQ_CLR_ALL		(0xffffffff)
+/* ASYS_TOP_CON (0x0600) */
+#define ASYS_TOP_CON_ASYS_TIMING_ON		(0x3 << 0)
 
 /* PWR2_ASM_CON1 (0x1070) */
 #define PWR2_ASM_CON1_INIT_VAL		(0x492492)
@@ -182,5 +145,4 @@
 #define ASYS_I2S_CON_WIDE_MODE_SET(x)	((x) << 1)
 #define ASYS_I2S_IN_PHASE_FIX		(0x1 << 31)
 
-#define AFE_END_ADDR 0x15e0
 #endif
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 8a643a35d3d4c8c2ae0c418231ee85772d0e995c..c7f7f8add5d9dcd5d6437e245d7ba7516ccbfbf2 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1083,7 +1083,7 @@ static int mt8173_afe_init_audio_clk(struct mtk_base_afe *afe)
 static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
 {
 	int ret, i;
-	unsigned int irq_id;
+	int irq_id;
 	struct mtk_base_afe *afe;
 	struct mt8173_afe_private *afe_priv;
 	struct resource *res;
@@ -1105,9 +1105,9 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
 	afe->dev = &pdev->dev;
 
 	irq_id = platform_get_irq(pdev, 0);
-	if (!irq_id) {
+	if (irq_id <= 0) {
 		dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
-		return -ENXIO;
+		return irq_id < 0 ? irq_id : -ENXIO;
 	}
 	ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
 			       0, "Afe_ISR_Handle", (void *)afe);
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index b6615affe5718b379df657f401cf34eeddc08dd1..81b09d740ed96ee39eeed88705305d46051c45da 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
 
 	/* polling the AC_R_FINISH */
 	while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
-								&& timeout--)
+								&& --timeout)
 		mdelay(1);
 
 	if (!timeout) {
@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
 
 	/* polling the AC_W_FINISH */
 	while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
-								&& timeout--)
+								&& --timeout)
 		mdelay(1);
 
 	if (!timeout)
@@ -345,11 +345,10 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
 		goto out;
 	}
 
-	nuc900_audio->irq_num = platform_get_irq(pdev, 0);
-	if (!nuc900_audio->irq_num) {
-		ret = -EBUSY;
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0)
 		goto out;
-	}
+	nuc900_audio->irq_num = ret;
 
 	nuc900_ac97_data = nuc900_audio;
 
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index d40219678700ed21fc6f45b0ef54f224ddeaa9da..cb72c1e57da044941198a83e78aa1c2435cae508 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -105,7 +105,7 @@ static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol,
 	int pin, changed = 0;
 
 	/* Refuse any mode changes if we are not able to control the codec. */
-	if (!cx20442_codec->hw_write)
+	if (!cx20442_codec->component.card->pop_time)
 		return -EUNATCH;
 
 	if (ucontrol->value.enumerated.item[0] >= control->items)
@@ -345,7 +345,7 @@ static void cx81801_receive(struct tty_struct *tty,
 	if (!codec)
 		return;
 
-	if (!codec->hw_write) {
+	if (!codec->component.card->pop_time) {
 		/* First modem response, complete setup procedure */
 
 		/* Initialize timer used for config pulse generation */
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index d49adc822a110ed2a9b61f1916570ed70065a3fd..704428735e3c978688505a786e5053b021a8d27b 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -43,7 +43,7 @@ struct apq8016_sbc_data {
 static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 	struct snd_soc_dai_link *dai_link = rtd->dai_link;
 	struct snd_soc_card *card = rtd->card;
 	struct apq8016_sbc_data *pdata = snd_soc_card_get_drvdata(card);
@@ -92,7 +92,7 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
 
 		jack = pdata->jack.jack;
 
-		snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_MEDIA);
+		snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
 		snd_jack_set_key(jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
 		snd_jack_set_key(jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
 		snd_jack_set_key(jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
@@ -102,15 +102,15 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
 	for (i = 0 ; i < dai_link->num_codecs; i++) {
 		struct snd_soc_dai *dai = rtd->codec_dais[i];
 
-		codec = dai->codec;
+		component = dai->component;
 		/* Set default mclk for internal codec */
-		rval = snd_soc_codec_set_sysclk(codec, 0, 0, DEFAULT_MCLK_RATE,
+		rval = snd_soc_component_set_sysclk(component, 0, 0, DEFAULT_MCLK_RATE,
 				       SND_SOC_CLOCK_IN);
 		if (rval != 0 && rval != -ENOTSUPP) {
 			dev_warn(card->dev, "Failed to set mclk: %d\n", rval);
 			return rval;
 		}
-		rval = snd_soc_codec_set_jack(codec, &pdata->jack, NULL);
+		rval = snd_soc_component_set_jack(component, &pdata->jack, NULL);
 		if (rval != 0 && rval != -ENOTSUPP) {
 			dev_warn(card->dev, "Failed to set jack: %d\n", rval);
 			return rval;
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index d64fbbd505448b4c90a04a40d5ebc90dd83fd42b..fa6cd1de828be95991dee4eaea9dec40f69d9fef 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -206,7 +206,8 @@ static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
 		return ret;
 	}
 
-	snd_jack_set_key(rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+	snd_jack_set_key(
+		rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
 	snd_jack_set_key(
 		rockchip_sound_jack.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
 	snd_jack_set_key(
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index 34deba461ae18b7218b55fb78709da5886ea4974..0e66cd8ef2f908261a34c992f5eca7143aac9039 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -60,13 +60,13 @@ static int bells_set_bias_level(struct snd_soc_card *card,
 {
 	struct snd_soc_pcm_runtime *rtd;
 	struct snd_soc_dai *codec_dai;
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 	struct bells_drvdata *bells = card->drvdata;
 	int ret;
 
 	rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
 	codec_dai = rtd->codec_dai;
-	codec = codec_dai->codec;
+	component = codec_dai->component;
 
 	if (dapm->dev != codec_dai->dev)
 		return 0;
@@ -76,7 +76,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
 		if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
 			break;
 
-		ret = snd_soc_codec_set_pll(codec, WM5102_FLL1,
+		ret = snd_soc_component_set_pll(component, WM5102_FLL1,
 					    ARIZONA_FLL_SRC_MCLK1,
 					    MCLK_RATE,
 					    bells->sysclk_rate);
@@ -84,7 +84,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
 			pr_err("Failed to start FLL: %d\n", ret);
 
 		if (bells->asyncclk_rate) {
-			ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+			ret = snd_soc_component_set_pll(component, WM5102_FLL2,
 						    ARIZONA_FLL_SRC_AIF2BCLK,
 						    BCLK2_RATE,
 						    bells->asyncclk_rate);
@@ -106,27 +106,27 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
 {
 	struct snd_soc_pcm_runtime *rtd;
 	struct snd_soc_dai *codec_dai;
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *component;
 	struct bells_drvdata *bells = card->drvdata;
 	int ret;
 
 	rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
 	codec_dai = rtd->codec_dai;
-	codec = codec_dai->codec;
+	component = codec_dai->component;
 
 	if (dapm->dev != codec_dai->dev)
 		return 0;
 
 	switch (level) {
 	case SND_SOC_BIAS_STANDBY:
-		ret = snd_soc_codec_set_pll(codec, WM5102_FLL1, 0, 0, 0);
+		ret = snd_soc_component_set_pll(component, WM5102_FLL1, 0, 0, 0);
 		if (ret < 0) {
 			pr_err("Failed to stop FLL: %d\n", ret);
 			return ret;
 		}
 
 		if (bells->asyncclk_rate) {
-			ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+			ret = snd_soc_component_set_pll(component, WM5102_FLL2,
 						    0, 0, 0);
 			if (ret < 0) {
 				pr_err("Failed to stop FLL: %d\n", ret);
@@ -148,8 +148,8 @@ static int bells_late_probe(struct snd_soc_card *card)
 {
 	struct bells_drvdata *bells = card->drvdata;
 	struct snd_soc_pcm_runtime *rtd;
-	struct snd_soc_codec *wm0010;
-	struct snd_soc_codec *codec;
+	struct snd_soc_component *wm0010;
+	struct snd_soc_component *component;
 	struct snd_soc_dai *aif1_dai;
 	struct snd_soc_dai *aif2_dai;
 	struct snd_soc_dai *aif3_dai;
@@ -157,22 +157,22 @@ static int bells_late_probe(struct snd_soc_card *card)
 	int ret;
 
 	rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_AP_DSP].name);
-	wm0010 = rtd->codec;
+	wm0010 = rtd->codec_dai->component;
 
 	rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
-	codec = rtd->codec;
+	component = rtd->codec_dai->component;
 	aif1_dai = rtd->codec_dai;
 
-	ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+	ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_SYSCLK,
 				       ARIZONA_CLK_SRC_FLL1,
 				       bells->sysclk_rate,
 				       SND_SOC_CLOCK_IN);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+		dev_err(component->dev, "Failed to set SYSCLK: %d\n", ret);
 		return ret;
 	}
 
-	ret = snd_soc_codec_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
+	ret = snd_soc_component_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
 	if (ret != 0) {
 		dev_err(wm0010->dev, "Failed to set WM0010 clock: %d\n", ret);
 		return ret;
@@ -182,20 +182,20 @@ static int bells_late_probe(struct snd_soc_card *card)
 	if (ret != 0)
 		dev_err(aif1_dai->dev, "Failed to set AIF1 clock: %d\n", ret);
 
-	ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_OPCLK, 0,
+	ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_OPCLK, 0,
 				       SYS_MCLK_RATE, SND_SOC_CLOCK_OUT);
 	if (ret != 0)
-		dev_err(codec->dev, "Failed to set OPCLK: %d\n", ret);
+		dev_err(component->dev, "Failed to set OPCLK: %d\n", ret);
 
 	if (card->num_rtd == DAI_CODEC_CP)
 		return 0;
 
-	ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
+	ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_ASYNCCLK,
 				       ARIZONA_CLK_SRC_FLL2,
 				       bells->asyncclk_rate,
 				       SND_SOC_CLOCK_IN);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to set ASYNCCLK: %d\n", ret);
+		dev_err(component->dev, "Failed to set ASYNCCLK: %d\n", ret);
 		return ret;
 	}
 
@@ -221,7 +221,7 @@ static int bells_late_probe(struct snd_soc_card *card)
 		return ret;
 	}
 
-	ret = snd_soc_codec_set_sysclk(wm9081_dai->codec, WM9081_SYSCLK_MCLK,
+	ret = snd_soc_component_set_sysclk(wm9081_dai->component, WM9081_SYSCLK_MCLK,
 				       0, SYS_MCLK_RATE, 0);
 	if (ret != 0) {
 		dev_err(wm9081_dai->dev, "Failed to set MCLK: %d\n", ret);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f12a88a21dfa24d32f04dad32635dee2482a9337..64d5ecb865283f73c344494d0595abd25d200d1c 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -197,16 +197,27 @@ int rsnd_io_is_working(struct rsnd_dai_stream *io)
 	return 0;
 }
 
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+					      struct snd_pcm_hw_params *params)
 {
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 
-	return runtime->channels;
+	/*
+	 * params will be added when refine
+	 * see
+	 *	__rsnd_soc_hw_rule_rate()
+	 *	__rsnd_soc_hw_rule_channels()
+	 */
+	if (params)
+		return params_channels(params);
+	else
+		return runtime->channels;
 }
 
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+					       struct snd_pcm_hw_params *params)
 {
-	int chan = rsnd_runtime_channel_original(io);
+	int chan = rsnd_runtime_channel_original_with_params(io, params);
 	struct rsnd_mod *ctu_mod = rsnd_io_to_mod_ctu(io);
 
 	if (ctu_mod) {
@@ -219,12 +230,13 @@ int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
 	return chan;
 }
 
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+					     struct snd_pcm_hw_params *params)
 {
 	struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
 	int chan = rsnd_io_is_play(io) ?
-		rsnd_runtime_channel_after_ctu(io) :
-		rsnd_runtime_channel_original(io);
+		rsnd_runtime_channel_after_ctu_with_params(io, params) :
+		rsnd_runtime_channel_original_with_params(io, params);
 
 	/* Use Multi SSI */
 	if (rsnd_runtime_is_ssi_multi(io))
@@ -262,10 +274,10 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 	struct device *dev = rsnd_priv_to_dev(priv);
 
-	switch (runtime->sample_bits) {
+	switch (snd_pcm_format_width(runtime->format)) {
 	case 16:
 		return 8 << 16;
-	case 32:
+	case 24:
 		return 0 << 16;
 	}
 
@@ -282,11 +294,12 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 	struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
 	struct rsnd_mod *target;
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-	u32 val = 0x76543210;
-	u32 mask = ~0;
 
 	/*
-	 * *Hardware* L/R and *Software* L/R are inverted.
+	 * *Hardware* L/R and *Software* L/R are inverted for 16bit data.
+	 *	    31..16 15...0
+	 *	HW: [L ch] [R ch]
+	 *	SW: [R ch] [L ch]
 	 * We need to care about inversion timing to control
 	 * Playback/Capture correctly.
 	 * The point is [DVC] needs *Hardware* L/R, [MEM] needs *Software* L/R
@@ -313,27 +326,13 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 		target = cmd ? cmd : ssiu;
 	}
 
-	mask <<= runtime->channels * 4;
-	val = val & mask;
-
-	switch (runtime->sample_bits) {
-	case 16:
-		val |= 0x67452301 & ~mask;
-		break;
-	case 32:
-		val |= 0x76543210 & ~mask;
-		break;
-	}
-
-	/*
-	 * exchange channeles on SRC if possible,
-	 * otherwise, R/L volume settings on DVC
-	 * changes inverted channels
-	 */
-	if (mod == target)
-		return val;
-	else
+	/* Non target mod or 24bit data needs normal DALIGN */
+	if ((snd_pcm_format_width(runtime->format) != 16) ||
+	    (mod != target))
 		return 0x76543210;
+	/* Target mod needs inverted DALIGN when 16bit */
+	else
+		return 0x67452301;
 }
 
 u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
@@ -363,12 +362,8 @@ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
 	 * HW    24bit data is located as 0x******00
 	 *
 	 */
-	switch (runtime->sample_bits) {
-	case 16:
+	if (snd_pcm_format_width(runtime->format) == 16)
 		return 0;
-	case 32:
-		break;
-	}
 
 	for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
 		tmod = rsnd_io_to_mod(io, mods[i]);
@@ -616,8 +611,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
-		rsnd_dai_stream_init(io, substream);
-
 		ret = rsnd_dai_call(init, io, priv);
 		if (ret < 0)
 			goto dai_trigger_end;
@@ -639,7 +632,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
 
 		ret |= rsnd_dai_call(quit, io, priv);
 
-		rsnd_dai_stream_quit(io);
 		break;
 	default:
 		ret = -EINVAL;
@@ -784,8 +776,9 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
 	return snd_interval_refine(iv, &p);
 }
 
-static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
-				 struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
+				   struct snd_pcm_hw_rule *rule,
+				   int is_play)
 {
 	struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
 	struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -793,25 +786,37 @@ static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
 	struct snd_soc_dai *dai = rule->private;
 	struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
 	struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+	struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
 
 	/*
 	 * possible sampling rate limitation is same as
 	 * 2ch if it supports multi ssi
+	 * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
 	 */
 	ic = *ic_;
-	if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
-		ic.min = 2;
-		ic.max = 2;
-	}
+	ic.min =
+	ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
 
 	return rsnd_soc_hw_rule(priv, rsnd_soc_hw_rate_list,
 				ARRAY_SIZE(rsnd_soc_hw_rate_list),
 				&ic, ir);
 }
 
+static int rsnd_soc_hw_rule_rate_playback(struct snd_pcm_hw_params *params,
+				 struct snd_pcm_hw_rule *rule)
+{
+	return __rsnd_soc_hw_rule_rate(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_rate_capture(struct snd_pcm_hw_params *params,
+					  struct snd_pcm_hw_rule *rule)
+{
+	return __rsnd_soc_hw_rule_rate(params, rule, 0);
+}
 
-static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
-				     struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
+				       struct snd_pcm_hw_rule *rule,
+				       int is_play)
 {
 	struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
 	struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -819,22 +824,34 @@ static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
 	struct snd_soc_dai *dai = rule->private;
 	struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
 	struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+	struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
 
 	/*
 	 * possible sampling rate limitation is same as
 	 * 2ch if it supports multi ssi
+	 * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
 	 */
 	ic = *ic_;
-	if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
-		ic.min = 2;
-		ic.max = 2;
-	}
+	ic.min =
+	ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
 
 	return rsnd_soc_hw_rule(priv, rsnd_soc_hw_channels_list,
 				ARRAY_SIZE(rsnd_soc_hw_channels_list),
 				ir, &ic);
 }
 
+static int rsnd_soc_hw_rule_channels_playback(struct snd_pcm_hw_params *params,
+					      struct snd_pcm_hw_rule *rule)
+{
+	return __rsnd_soc_hw_rule_channels(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_channels_capture(struct snd_pcm_hw_params *params,
+					     struct snd_pcm_hw_rule *rule)
+{
+	return __rsnd_soc_hw_rule_channels(params, rule, 0);
+}
+
 static const struct snd_pcm_hardware rsnd_pcm_hardware = {
 	.info =		SNDRV_PCM_INFO_INTERLEAVED	|
 			SNDRV_PCM_INFO_MMAP		|
@@ -859,6 +876,8 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
 	int ret;
 	int i;
 
+	rsnd_dai_stream_init(io, substream);
+
 	/*
 	 * Channel Limitation
 	 * It depends on Platform design
@@ -886,11 +905,17 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
 	 * It depends on Clock Master Mode
 	 */
 	if (rsnd_rdai_is_clk_master(rdai)) {
+		int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
 		snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-				    rsnd_soc_hw_rule_rate, dai,
+				    is_play ? rsnd_soc_hw_rule_rate_playback :
+					      rsnd_soc_hw_rule_rate_capture,
+				    dai,
 				    SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 		snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
-				    rsnd_soc_hw_rule_channels, dai,
+				    is_play ? rsnd_soc_hw_rule_channels_playback :
+					      rsnd_soc_hw_rule_channels_capture,
+				    dai,
 				    SNDRV_PCM_HW_PARAM_RATE, -1);
 	}
 
@@ -915,6 +940,8 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
 	 * call rsnd_dai_call without spinlock
 	 */
 	rsnd_dai_call(nolock_stop, io, priv);
+
+	rsnd_dai_stream_quit(io);
 }
 
 static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
@@ -990,7 +1017,7 @@ of_node_compatible:
 
 static void __rsnd_dai_probe(struct rsnd_priv *priv,
 			     struct device_node *dai_np,
-			     int dai_i, int is_graph)
+			     int dai_i)
 {
 	struct device_node *playback, *capture;
 	struct rsnd_dai_stream *io_playback;
@@ -1089,13 +1116,13 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
 	dai_i = 0;
 	if (is_graph) {
 		for_each_endpoint_of_node(dai_node, dai_np) {
-			__rsnd_dai_probe(priv, dai_np, dai_i, is_graph);
+			__rsnd_dai_probe(priv, dai_np, dai_i);
 			rsnd_ssi_parse_hdmi_connection(priv, dai_np, dai_i);
 			dai_i++;
 		}
 	} else {
 		for_each_child_of_node(dai_node, dai_np)
-			__rsnd_dai_probe(priv, dai_np, dai_i++, is_graph);
+			__rsnd_dai_probe(priv, dai_np, dai_i++);
 	}
 
 	return 0;
@@ -1496,6 +1523,8 @@ static int rsnd_remove(struct platform_device *pdev)
 	};
 	int ret = 0, i;
 
+	snd_soc_disconnect_sync(&pdev->dev);
+
 	pm_runtime_disable(&pdev->dev);
 
 	for_each_rsnd_dai(rdai, priv, i) {
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 4d750bdf8e2449981537a4b6d41bea33af321789..41de23417c4a11199140f32c8b7804c51e606a24 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -71,25 +71,7 @@ static struct rsnd_mod mem = {
 static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
 				  struct rsnd_dai_stream *io)
 {
-	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-	bool elapsed = false;
-	unsigned long flags;
-
-	/*
-	 * Renesas sound Gen1 needs 1 DMAC,
-	 * Gen2 needs 2 DMAC.
-	 * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
-	 * But, Audio-DMAC-peri-peri doesn't have interrupt,
-	 * and this driver is assuming that here.
-	 */
-	spin_lock_irqsave(&priv->lock, flags);
-
 	if (rsnd_io_is_working(io))
-		elapsed = true;
-
-	spin_unlock_irqrestore(&priv->lock, flags);
-
-	if (elapsed)
 		rsnd_dai_period_elapsed(io);
 }
 
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 57cd2bc773c2ab419e33a814d07b815a42af99cb..ad6523595b0acdd0f3c3a7932b5ae535e9eb4cab 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -399,9 +399,18 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
 		struct device_node *playback,
 		struct device_node *capture);
 
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io);
+#define rsnd_runtime_channel_original(io) \
+	rsnd_runtime_channel_original_with_params(io, NULL)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+				struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_after_ctu(io)			\
+	rsnd_runtime_channel_after_ctu_with_params(io, NULL)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+				struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_for_ssi(io) \
+	rsnd_runtime_channel_for_ssi_with_params(io, NULL)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+				 struct snd_pcm_hw_params *params);
 int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io);
 int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
 
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index cbf3bf312d23bda9427747c1af2dde30d9fcf43d..97a9db892a8f06461be29eacceae01e8c40f8da9 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -79,8 +79,8 @@ struct rsnd_ssi {
 	int irq;
 	unsigned int usrcnt;
 
+	/* for PIO */
 	int byte_pos;
-	int period_pos;
 	int byte_per_period;
 	int next_period_byte;
 };
@@ -371,11 +371,11 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
 	if (rsnd_io_is_play(io))
 		cr_own |= TRMD;
 
-	switch (runtime->sample_bits) {
+	switch (snd_pcm_format_width(runtime->format)) {
 	case 16:
 		cr_own |= DWL_16;
 		break;
-	case 32:
+	case 24:
 		cr_own |= DWL_24;
 		break;
 	}
@@ -414,63 +414,6 @@ static void rsnd_ssi_register_setup(struct rsnd_mod *mod)
 					ssi->cr_en);
 }
 
-static void rsnd_ssi_pointer_init(struct rsnd_mod *mod,
-				  struct rsnd_dai_stream *io)
-{
-	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
-	ssi->byte_pos		= 0;
-	ssi->period_pos		= 0;
-	ssi->byte_per_period	= runtime->period_size *
-				  runtime->channels *
-				  samples_to_bytes(runtime, 1);
-	ssi->next_period_byte	= ssi->byte_per_period;
-}
-
-static int rsnd_ssi_pointer_offset(struct rsnd_mod *mod,
-				   struct rsnd_dai_stream *io,
-				   int additional)
-{
-	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-	int pos = ssi->byte_pos + additional;
-
-	pos %= (runtime->periods * ssi->byte_per_period);
-
-	return pos;
-}
-
-static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
-				    struct rsnd_dai_stream *io,
-				    int byte)
-{
-	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-	bool ret = false;
-	int byte_pos;
-
-	byte_pos = ssi->byte_pos + byte;
-
-	if (byte_pos >= ssi->next_period_byte) {
-		struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
-		ssi->period_pos++;
-		ssi->next_period_byte += ssi->byte_per_period;
-
-		if (ssi->period_pos >= runtime->periods) {
-			byte_pos = 0;
-			ssi->period_pos = 0;
-			ssi->next_period_byte = ssi->byte_per_period;
-		}
-
-		ret = true;
-	}
-
-	WRITE_ONCE(ssi->byte_pos, byte_pos);
-
-	return ret;
-}
-
 /*
  *	SSI mod common functions
  */
@@ -484,8 +427,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
 	if (!rsnd_ssi_is_run_mods(mod, io))
 		return 0;
 
-	rsnd_ssi_pointer_init(mod, io);
-
 	ssi->usrcnt++;
 
 	rsnd_mod_power_on(mod);
@@ -656,6 +597,8 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod,
 	return 0;
 }
 
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+				   struct rsnd_dai_stream *io);
 static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
 				 struct rsnd_dai_stream *io)
 {
@@ -674,30 +617,8 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
 	status = rsnd_ssi_status_get(mod);
 
 	/* PIO only */
-	if (!is_dma && (status & DIRQ)) {
-		struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-		u32 *buf = (u32 *)(runtime->dma_area +
-				   rsnd_ssi_pointer_offset(mod, io, 0));
-		int shift = 0;
-
-		switch (runtime->sample_bits) {
-		case 32:
-			shift = 8;
-			break;
-		}
-
-		/*
-		 * 8/16/32 data can be assesse to TDR/RDR register
-		 * directly as 32bit data
-		 * see rsnd_ssi_init()
-		 */
-		if (rsnd_io_is_play(io))
-			rsnd_mod_write(mod, SSITDR, (*buf) << shift);
-		else
-			*buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
-
-		elapsed = rsnd_ssi_pointer_update(mod, io, sizeof(*buf));
-	}
+	if (!is_dma && (status & DIRQ))
+		elapsed = rsnd_ssi_pio_interrupt(mod, io);
 
 	/* DMA only */
 	if (is_dma && (status & (UIRQ | OIRQ)))
@@ -835,7 +756,71 @@ static int rsnd_ssi_common_remove(struct rsnd_mod *mod,
 	return 0;
 }
 
-static int rsnd_ssi_pointer(struct rsnd_mod *mod,
+/*
+ *	SSI PIO functions
+ */
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+				   struct rsnd_dai_stream *io)
+{
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+	u32 *buf = (u32 *)(runtime->dma_area + ssi->byte_pos);
+	int shift = 0;
+	int byte_pos;
+	bool elapsed = false;
+
+	if (snd_pcm_format_width(runtime->format) == 24)
+		shift = 8;
+
+	/*
+	 * 8/16/32 data can be assesse to TDR/RDR register
+	 * directly as 32bit data
+	 * see rsnd_ssi_init()
+	 */
+	if (rsnd_io_is_play(io))
+		rsnd_mod_write(mod, SSITDR, (*buf) << shift);
+	else
+		*buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
+
+	byte_pos = ssi->byte_pos + sizeof(*buf);
+
+	if (byte_pos >= ssi->next_period_byte) {
+		int period_pos = byte_pos / ssi->byte_per_period;
+
+		if (period_pos >= runtime->periods) {
+			byte_pos = 0;
+			period_pos = 0;
+		}
+
+		ssi->next_period_byte = (period_pos + 1) * ssi->byte_per_period;
+
+		elapsed = true;
+	}
+
+	WRITE_ONCE(ssi->byte_pos, byte_pos);
+
+	return elapsed;
+}
+
+static int rsnd_ssi_pio_init(struct rsnd_mod *mod,
+			     struct rsnd_dai_stream *io,
+			     struct rsnd_priv *priv)
+{
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+	if (!rsnd_ssi_is_parent(mod, io)) {
+		ssi->byte_pos		= 0;
+		ssi->byte_per_period	= runtime->period_size *
+					  runtime->channels *
+					  samples_to_bytes(runtime, 1);
+		ssi->next_period_byte	= ssi->byte_per_period;
+	}
+
+	return rsnd_ssi_init(mod, io, priv);
+}
+
+static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
 			    struct rsnd_dai_stream *io,
 			    snd_pcm_uframes_t *pointer)
 {
@@ -851,12 +836,12 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
 	.name	= SSI_NAME,
 	.probe	= rsnd_ssi_common_probe,
 	.remove	= rsnd_ssi_common_remove,
-	.init	= rsnd_ssi_init,
+	.init	= rsnd_ssi_pio_init,
 	.quit	= rsnd_ssi_quit,
 	.start	= rsnd_ssi_start,
 	.stop	= rsnd_ssi_stop,
 	.irq	= rsnd_ssi_irq,
-	.pointer= rsnd_ssi_pointer,
+	.pointer = rsnd_ssi_pio_pointer,
 	.pcm_new = rsnd_ssi_pcm_new,
 	.hw_params = rsnd_ssi_hw_params,
 };
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index d9b1e6417fb947652f9186ea13c1845ca5882b85..81232f4ab614b3089fe1af42a5612f3a1cbd8fea 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -1096,7 +1096,6 @@ static struct snd_compr_ops soc_compr_dyn_ops = {
  */
 int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 {
-	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_platform *platform = rtd->platform;
 	struct snd_soc_component *component;
 	struct snd_soc_rtdcom_list *rtdcom;
@@ -1199,8 +1198,9 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 	ret = snd_compress_new(rtd->card->snd_card, num, direction,
 				new_name, compr);
 	if (ret < 0) {
+		component = rtd->codec_dai->component;
 		pr_err("compress asoc: can't create compress for codec %s\n",
-			codec->component.name);
+			component->name);
 		goto compr_err;
 	}
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c0edac80df34e683291cff5fb86c560ee8e22ffd..d1f7e639d5b1ca05e1533c1cdddcc546b4723cf6 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -213,7 +213,7 @@ static umode_t soc_dev_attr_is_visible(struct kobject *kobj,
 
 	if (attr == &dev_attr_pmdown_time.attr)
 		return attr->mode; /* always visible */
-	return rtd->codec ? attr->mode : 0; /* enabled only with codec */
+	return rtd->num_codecs ? attr->mode : 0; /* enabled only with codec */
 }
 
 static const struct attribute_group soc_dapm_dev_group = {
@@ -1392,6 +1392,17 @@ static int soc_init_dai_link(struct snd_soc_card *card,
 	return 0;
 }
 
+void snd_soc_disconnect_sync(struct device *dev)
+{
+	struct snd_soc_component *component = snd_soc_lookup_component(dev, NULL);
+
+	if (!component || !component->card)
+		return;
+
+	snd_card_disconnect_sync(component->card->snd_card);
+}
+EXPORT_SYMBOL_GPL(snd_soc_disconnect_sync);
+
 /**
  * snd_soc_add_dai_link - Add a DAI link dynamically
  * @card: The ASoC card to which the DAI link is added
@@ -1945,7 +1956,9 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
 	}
 
 	/* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */
-	if (cpu_dai->codec) {
+	/* the component which has non_legacy_dai_naming is Codec */
+	if (cpu_dai->codec ||
+	    cpu_dai->component->driver->non_legacy_dai_naming) {
 		unsigned int inv_dai_fmt;
 
 		inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
@@ -3149,7 +3162,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
 	if (!dai->driver->ops)
 		dai->driver->ops = &null_dai_ops;
 
-	list_add(&dai->list, &component->dai_list);
+	list_add_tail(&dai->list, &component->dai_list);
 	component->num_dai++;
 
 	dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name);
@@ -3176,8 +3189,6 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
 
 	dev_dbg(dev, "ASoC: dai register %s #%zu\n", dev_name(dev), count);
 
-	component->dai_drv = dai_drv;
-
 	for (i = 0; i < count; i++) {
 
 		dai = soc_add_dai(component, dai_drv + i,
@@ -4354,6 +4365,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
 							     args,
 							     dai_name);
 		} else {
+			struct snd_soc_dai *dai;
 			int id = -1;
 
 			switch (args->args_count) {
@@ -4375,7 +4387,14 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
 
 			ret = 0;
 
-			*dai_name = pos->dai_drv[id].name;
+			/* find target DAI */
+			list_for_each_entry(dai, &pos->dai_list, list) {
+				if (id == 0)
+					break;
+				id--;
+			}
+
+			*dai_name = dai->driver->name;
 			if (!*dai_name)
 				*dai_name = pos->name;
 		}
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 500f98c730b964e6aee457d91ea21e29020a5334..7144a51ddfa9aaa49c917461e1aa9293f26955a4 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -378,7 +378,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
 	unsigned int rshift = mc->rshift;
 	int max = mc->max;
 	int min = mc->min;
-	int mask = (1 << (fls(min + max) - 1)) - 1;
+	unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
 	unsigned int val;
 	int ret;
 
@@ -423,7 +423,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
 	unsigned int rshift = mc->rshift;
 	int max = mc->max;
 	int min = mc->min;
-	int mask = (1 << (fls(min + max) - 1)) - 1;
+	unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
 	int err = 0;
 	unsigned int val, val_mask, val2 = 0;
 
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index e30aacbcfc291931dfb9723bcf35f0f66a220a4b..bcd3da2739e20d5db3b9a52d185b357150369de0 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -288,7 +288,7 @@ static const struct snd_soc_platform_driver dummy_platform = {
 	.ops = &dummy_dma_ops,
 };
 
-static struct snd_soc_codec_driver dummy_codec;
+static const struct snd_soc_codec_driver dummy_codec;
 
 #define STUB_RATES	SNDRV_PCM_RATE_8000_192000
 #define STUB_FORMATS	(SNDRV_PCM_FMTBIT_S8 | \
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index d6f71a3406e91e5c09679d85c9f31268ada71649..d743b7dd52fb9e1fc508ceca54c9d25a67182609 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -28,16 +28,6 @@
 
 #include "stm32_sai.h"
 
-static LIST_HEAD(sync_providers);
-static DEFINE_MUTEX(sync_mutex);
-
-struct sync_provider {
-	struct list_head link;
-	struct device_node *node;
-	int  (*sync_conf)(void *data, int synco);
-	void *data;
-};
-
 static const struct stm32_sai_conf stm32_sai_conf_f4 = {
 	.version = SAI_STM32F4,
 };
@@ -70,9 +60,8 @@ static int stm32_sai_sync_conf_client(struct stm32_sai_data *sai, int synci)
 	return 0;
 }
 
-static int stm32_sai_sync_conf_provider(void *data, int synco)
+static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco)
 {
-	struct stm32_sai_data *sai = (struct stm32_sai_data *)data;
 	u32 prev_synco;
 	int ret;
 
@@ -103,83 +92,42 @@ static int stm32_sai_sync_conf_provider(void *data, int synco)
 	return 0;
 }
 
-static int stm32_sai_set_sync_provider(struct device_node *np, int synco)
+static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
+			      struct device_node *np_provider,
+			      int synco, int synci)
 {
-	struct sync_provider *provider;
+	struct platform_device *pdev = of_find_device_by_node(np_provider);
+	struct stm32_sai_data *sai_provider;
 	int ret;
 
-	mutex_lock(&sync_mutex);
-	list_for_each_entry(provider, &sync_providers, link) {
-		if (provider->node == np) {
-			ret = provider->sync_conf(provider->data, synco);
-			mutex_unlock(&sync_mutex);
-			return ret;
-		}
+	if (!pdev) {
+		dev_err(&sai_client->pdev->dev,
+			"Device not found for node %s\n", np_provider->name);
+		return -ENODEV;
 	}
-	mutex_unlock(&sync_mutex);
 
-	/* SAI sync provider not found */
-	return -ENODEV;
-}
-
-static int stm32_sai_set_sync(struct stm32_sai_data *sai,
-			      struct device_node *np_provider,
-			      int synco, int synci)
-{
-	int ret;
+	sai_provider = platform_get_drvdata(pdev);
+	if (!sai_provider) {
+		dev_err(&sai_client->pdev->dev,
+			"SAI sync provider data not found\n");
+		return -EINVAL;
+	}
 
 	/* Configure sync client */
-	stm32_sai_sync_conf_client(sai, synci);
+	ret = stm32_sai_sync_conf_client(sai_client, synci);
+	if (ret < 0)
+		return ret;
 
 	/* Configure sync provider */
-	ret = stm32_sai_set_sync_provider(np_provider, synco);
-
-	return ret;
-}
-
-static int stm32_sai_sync_add_provider(struct platform_device *pdev,
-				       void *data)
-{
-	struct sync_provider *sp;
-
-	sp = devm_kzalloc(&pdev->dev, sizeof(*sp), GFP_KERNEL);
-	if (!sp)
-		return -ENOMEM;
-
-	sp->node = of_node_get(pdev->dev.of_node);
-	sp->data = data;
-	sp->sync_conf = &stm32_sai_sync_conf_provider;
-
-	mutex_lock(&sync_mutex);
-	list_add(&sp->link, &sync_providers);
-	mutex_unlock(&sync_mutex);
-
-	return 0;
-}
-
-static void stm32_sai_sync_del_provider(struct device_node *np)
-{
-	struct sync_provider *sp;
-
-	mutex_lock(&sync_mutex);
-	list_for_each_entry(sp, &sync_providers, link) {
-		if (sp->node == np) {
-			list_del(&sp->link);
-			of_node_put(sp->node);
-			break;
-		}
-	}
-	mutex_unlock(&sync_mutex);
+	return stm32_sai_sync_conf_provider(sai_provider, synco);
 }
 
 static int stm32_sai_probe(struct platform_device *pdev)
 {
-	struct device_node *np = pdev->dev.of_node;
 	struct stm32_sai_data *sai;
 	struct reset_control *rst;
 	struct resource *res;
 	const struct of_device_id *of_id;
-	int ret;
 
 	sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
 	if (!sai)
@@ -231,28 +179,11 @@ static int stm32_sai_probe(struct platform_device *pdev)
 		reset_control_deassert(rst);
 	}
 
-	ret = stm32_sai_sync_add_provider(pdev, sai);
-	if (ret < 0)
-		return ret;
-	sai->set_sync = &stm32_sai_set_sync;
-
 	sai->pdev = pdev;
+	sai->set_sync = &stm32_sai_set_sync;
 	platform_set_drvdata(pdev, sai);
 
-	ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
-	if (ret < 0)
-		stm32_sai_sync_del_provider(np);
-
-	return ret;
-}
-
-static int stm32_sai_remove(struct platform_device *pdev)
-{
-	of_platform_depopulate(&pdev->dev);
-
-	stm32_sai_sync_del_provider(pdev->dev.of_node);
-
-	return 0;
+	return devm_of_platform_populate(&pdev->dev);
 }
 
 MODULE_DEVICE_TABLE(of, stm32_sai_ids);
@@ -263,7 +194,6 @@ static struct platform_driver stm32_sai_driver = {
 		.of_match_table = stm32_sai_ids,
 	},
 	.probe = stm32_sai_probe,
-	.remove = stm32_sai_remove,
 };
 
 module_platform_driver(stm32_sai_driver);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 5da4efe7a5505bb90fef17f13e7b73c1529cd7b5..886281673972898cee7950d32ca0ce9c2804039a 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -590,12 +590,28 @@ static int sun4i_codec_hw_params(struct snd_pcm_substream *substream,
 					     hwrate);
 }
 
+
+static unsigned int sun4i_codec_src_rates[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000,
+	44100, 48000, 96000, 192000
+};
+
+
+static struct snd_pcm_hw_constraint_list sun4i_codec_constraints = {
+	.count  = ARRAY_SIZE(sun4i_codec_src_rates),
+	.list   = sun4i_codec_src_rates,
+};
+
+
 static int sun4i_codec_startup(struct snd_pcm_substream *substream,
 			       struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card);
 
+	snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_RATE, &sun4i_codec_constraints);
+
 	/*
 	 * Stop issuing DRQ when we have room for less than 16 samples
 	 * in our TX FIFO
@@ -633,9 +649,7 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
 		.channels_max	= 2,
 		.rate_min	= 8000,
 		.rate_max	= 192000,
-		.rates		= SNDRV_PCM_RATE_8000_48000 |
-				  SNDRV_PCM_RATE_96000 |
-				  SNDRV_PCM_RATE_192000,
+		.rates		= SNDRV_PCM_RATE_CONTINUOUS,
 		.formats	= SNDRV_PCM_FMTBIT_S16_LE |
 				  SNDRV_PCM_FMTBIT_S32_LE,
 		.sig_bits	= 24,
@@ -645,11 +659,8 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
 		.channels_min	= 1,
 		.channels_max	= 2,
 		.rate_min	= 8000,
-		.rate_max	= 192000,
-		.rates		= SNDRV_PCM_RATE_8000_48000 |
-				  SNDRV_PCM_RATE_96000 |
-				  SNDRV_PCM_RATE_192000 |
-				  SNDRV_PCM_RATE_KNOT,
+		.rate_max	= 48000,
+		.rates		= SNDRV_PCM_RATE_CONTINUOUS,
 		.formats	= SNDRV_PCM_FMTBIT_S16_LE |
 				  SNDRV_PCM_FMTBIT_S32_LE,
 		.sig_bits	= 24,
@@ -1128,7 +1139,7 @@ static const struct snd_soc_component_driver sun4i_codec_component = {
 	.name = "sun4i-codec",
 };
 
-#define SUN4I_CODEC_RATES	SNDRV_PCM_RATE_8000_192000
+#define SUN4I_CODEC_RATES	SNDRV_PCM_RATE_CONTINUOUS
 #define SUN4I_CODEC_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | \
 				 SNDRV_PCM_FMTBIT_S32_LE)
 
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 04f92583a9696569aeac2a92084643a94cbe57d6..dca1143c1150ac58aa148a49db54c33226e366c8 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -269,10 +269,11 @@ static bool sun4i_i2s_oversample_is_valid(unsigned int oversample)
 	return false;
 }
 
-static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
+static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
 				  unsigned int rate,
 				  unsigned int word_size)
 {
+	struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
 	unsigned int oversample_rate, clk_rate;
 	int bclk_div, mclk_div;
 	int ret;
@@ -300,6 +301,7 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
 		break;
 
 	default:
+		dev_err(dai->dev, "Unsupported sample rate: %u\n", rate);
 		return -EINVAL;
 	}
 
@@ -308,18 +310,25 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
 		return ret;
 
 	oversample_rate = i2s->mclk_freq / rate;
-	if (!sun4i_i2s_oversample_is_valid(oversample_rate))
+	if (!sun4i_i2s_oversample_is_valid(oversample_rate)) {
+		dev_err(dai->dev, "Unsupported oversample rate: %d\n",
+			oversample_rate);
 		return -EINVAL;
+	}
 
 	bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
 					  word_size);
-	if (bclk_div < 0)
+	if (bclk_div < 0) {
+		dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
 		return -EINVAL;
+	}
 
 	mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
 					  clk_rate, rate);
-	if (mclk_div < 0)
+	if (mclk_div < 0) {
+		dev_err(dai->dev, "Unsupported MCLK divider: %d\n", mclk_div);
 		return -EINVAL;
+	}
 
 	/* Adjust the clock division values if needed */
 	bclk_div += i2s->variant->bclk_offset;
@@ -349,8 +358,11 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
 	u32 width;
 
 	channels = params_channels(params);
-	if (channels != 2)
+	if (channels != 2) {
+		dev_err(dai->dev, "Unsupported number of channels: %d\n",
+			channels);
 		return -EINVAL;
+	}
 
 	if (i2s->variant->has_chcfg) {
 		regmap_update_bits(i2s->regmap, SUN8I_I2S_CHAN_CFG_REG,
@@ -382,6 +394,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
 		width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 		break;
 	default:
+		dev_err(dai->dev, "Unsupported physical sample width: %d\n",
+			params_physical_width(params));
 		return -EINVAL;
 	}
 	i2s->playback_dma_data.addr_width = width;
@@ -393,6 +407,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
 		break;
 
 	default:
+		dev_err(dai->dev, "Unsupported sample width: %d\n",
+			params_width(params));
 		return -EINVAL;
 	}
 
@@ -401,7 +417,7 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
 	regmap_field_write(i2s->field_fmt_sr,
 			   sr + i2s->variant->fmt_offset);
 
-	return sun4i_i2s_set_clk_rate(i2s, params_rate(params),
+	return sun4i_i2s_set_clk_rate(dai, params_rate(params),
 				      params_width(params));
 }
 
@@ -426,6 +442,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 		val = SUN4I_I2S_FMT0_FMT_RIGHT_J;
 		break;
 	default:
+		dev_err(dai->dev, "Unsupported format: %d\n",
+			fmt & SND_SOC_DAIFMT_FORMAT_MASK);
 		return -EINVAL;
 	}
 
@@ -464,6 +482,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 	case SND_SOC_DAIFMT_NB_NF:
 		break;
 	default:
+		dev_err(dai->dev, "Unsupported clock polarity: %d\n",
+			fmt & SND_SOC_DAIFMT_INV_MASK);
 		return -EINVAL;
 	}
 
@@ -482,6 +502,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 			val = SUN4I_I2S_CTRL_MODE_SLAVE;
 			break;
 		default:
+			dev_err(dai->dev, "Unsupported slave setting: %d\n",
+				fmt & SND_SOC_DAIFMT_MASTER_MASK);
 			return -EINVAL;
 		}
 		regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -504,6 +526,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 			val = 0;
 			break;
 		default:
+			dev_err(dai->dev, "Unsupported slave setting: %d\n",
+				fmt & SND_SOC_DAIFMT_MASTER_MASK);
 			return -EINVAL;
 		}
 		regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -897,6 +921,23 @@ static const struct sun4i_i2s_quirks sun6i_a31_i2s_quirks = {
 	.field_rxchansel	= REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
 };
 
+static const struct sun4i_i2s_quirks sun8i_a83t_i2s_quirks = {
+	.has_reset		= true,
+	.reg_offset_txdata	= SUN8I_I2S_FIFO_TX_REG,
+	.sun4i_i2s_regmap	= &sun4i_i2s_regmap_config,
+	.field_clkdiv_mclk_en	= REG_FIELD(SUN4I_I2S_CLK_DIV_REG, 7, 7),
+	.field_fmt_wss		= REG_FIELD(SUN4I_I2S_FMT0_REG, 2, 3),
+	.field_fmt_sr		= REG_FIELD(SUN4I_I2S_FMT0_REG, 4, 5),
+	.field_fmt_bclk		= REG_FIELD(SUN4I_I2S_FMT0_REG, 6, 6),
+	.field_fmt_lrclk	= REG_FIELD(SUN4I_I2S_FMT0_REG, 7, 7),
+	.has_slave_select_bit	= true,
+	.field_fmt_mode		= REG_FIELD(SUN4I_I2S_FMT0_REG, 0, 1),
+	.field_txchanmap	= REG_FIELD(SUN4I_I2S_TX_CHAN_MAP_REG, 0, 31),
+	.field_rxchanmap	= REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31),
+	.field_txchansel	= REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2),
+	.field_rxchansel	= REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
+};
+
 static const struct sun4i_i2s_quirks sun8i_h3_i2s_quirks = {
 	.has_reset		= true,
 	.reg_offset_txdata	= SUN8I_I2S_FIFO_TX_REG,
@@ -1120,6 +1161,10 @@ static const struct of_device_id sun4i_i2s_match[] = {
 		.compatible = "allwinner,sun6i-a31-i2s",
 		.data = &sun6i_a31_i2s_quirks,
 	},
+	{
+		.compatible = "allwinner,sun8i-a83t-i2s",
+		.data = &sun8i_a83t_i2s_quirks,
+	},
 	{
 		.compatible = "allwinner,sun8i-h3-i2s",
 		.data = &sun8i_h3_i2s_quirks,
diff --git a/sound/soc/uniphier/Kconfig b/sound/soc/uniphier/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..02886a457eafa3638fdd30805dfc518929170637
--- /dev/null
+++ b/sound/soc/uniphier/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config SND_SOC_UNIPHIER
+	tristate "ASoC support for UniPhier"
+	depends on (ARCH_UNIPHIER || COMPILE_TEST)
+	help
+	  Say Y or M if you want to add support for the Socionext
+	  UniPhier SoC audio interfaces. You will also need to select the
+	  audio interfaces to support below.
+	  If unsure select "N".
+
+config SND_SOC_UNIPHIER_EVEA_CODEC
+	tristate "UniPhier SoC internal audio codec"
+	depends on SND_SOC_UNIPHIER
+	select REGMAP_MMIO
+	help
+	  This adds Codec driver for Socionext UniPhier LD11/20 SoC
+	  internal DAC. This driver supports Line In / Out and HeadPhone.
+	  Select Y if you use such device.
+	  If unsure select "N".
diff --git a/sound/soc/uniphier/Makefile b/sound/soc/uniphier/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..3be00d72f5e562556770377640e98ba43dbd06bd
--- /dev/null
+++ b/sound/soc/uniphier/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+snd-soc-uniphier-evea-objs := evea.o
+obj-$(CONFIG_SND_SOC_UNIPHIER_EVEA_CODEC) += snd-soc-uniphier-evea.o
diff --git a/sound/soc/uniphier/evea.c b/sound/soc/uniphier/evea.c
new file mode 100644
index 0000000000000000000000000000000000000000..0cc9efff1d9a557c08fac3dcb30dd553f429070d
--- /dev/null
+++ b/sound/soc/uniphier/evea.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Socionext UniPhier EVEA ADC/DAC codec driver.
+ *
+ * Copyright (c) 2016-2017 Socionext Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define DRV_NAME        "evea"
+#define EVEA_RATES      SNDRV_PCM_RATE_48000
+#define EVEA_FORMATS    SNDRV_PCM_FMTBIT_S32_LE
+
+#define AADCPOW(n)                           (0x0078 + 0x04 * (n))
+#define   AADCPOW_AADC_POWD                   BIT(0)
+#define AHPOUTPOW                            0x0098
+#define   AHPOUTPOW_HP_ON                     BIT(4)
+#define ALINEPOW                             0x009c
+#define   ALINEPOW_LIN2_POWD                  BIT(3)
+#define   ALINEPOW_LIN1_POWD                  BIT(4)
+#define ALO1OUTPOW                           0x00a8
+#define   ALO1OUTPOW_LO1_ON                   BIT(4)
+#define ALO2OUTPOW                           0x00ac
+#define   ALO2OUTPOW_ADAC2_MUTE               BIT(0)
+#define   ALO2OUTPOW_LO2_ON                   BIT(4)
+#define AANAPOW                              0x00b8
+#define   AANAPOW_A_POWD                      BIT(4)
+#define ADACSEQ1(n)                          (0x0144 + 0x40 * (n))
+#define   ADACSEQ1_MMUTE                      BIT(1)
+#define ADACSEQ2(n)                          (0x0160 + 0x40 * (n))
+#define   ADACSEQ2_ADACIN_FIX                 BIT(0)
+#define ADAC1ODC                             0x0200
+#define   ADAC1ODC_HP_DIS_RES_MASK            GENMASK(2, 1)
+#define   ADAC1ODC_HP_DIS_RES_OFF             (0x0 << 1)
+#define   ADAC1ODC_HP_DIS_RES_ON              (0x3 << 1)
+#define   ADAC1ODC_ADAC_RAMPCLT_MASK          GENMASK(8, 7)
+#define   ADAC1ODC_ADAC_RAMPCLT_NORMAL        (0x0 << 7)
+#define   ADAC1ODC_ADAC_RAMPCLT_REDUCE        (0x1 << 7)
+
+struct evea_priv {
+	struct clk *clk, *clk_exiv;
+	struct reset_control *rst, *rst_exiv, *rst_adamv;
+	struct regmap *regmap;
+
+	int switch_lin;
+	int switch_lo;
+	int switch_hp;
+};
+
+static const struct snd_soc_dapm_widget evea_widgets[] = {
+	SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_INPUT("LIN1_LP"),
+	SND_SOC_DAPM_INPUT("LIN1_RP"),
+	SND_SOC_DAPM_INPUT("LIN2_LP"),
+	SND_SOC_DAPM_INPUT("LIN2_RP"),
+	SND_SOC_DAPM_INPUT("LIN3_LP"),
+	SND_SOC_DAPM_INPUT("LIN3_RP"),
+
+	SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_OUTPUT("HP1_L"),
+	SND_SOC_DAPM_OUTPUT("HP1_R"),
+	SND_SOC_DAPM_OUTPUT("LO2_L"),
+	SND_SOC_DAPM_OUTPUT("LO2_R"),
+};
+
+static const struct snd_soc_dapm_route evea_routes[] = {
+	{ "ADC", NULL, "LIN1_LP" },
+	{ "ADC", NULL, "LIN1_RP" },
+	{ "ADC", NULL, "LIN2_LP" },
+	{ "ADC", NULL, "LIN2_RP" },
+	{ "ADC", NULL, "LIN3_LP" },
+	{ "ADC", NULL, "LIN3_RP" },
+
+	{ "HP1_L", NULL, "DAC" },
+	{ "HP1_R", NULL, "DAC" },
+	{ "LO2_L", NULL, "DAC" },
+	{ "LO2_R", NULL, "DAC" },
+};
+
+static void evea_set_power_state_on(struct evea_priv *evea)
+{
+	struct regmap *map = evea->regmap;
+
+	regmap_update_bits(map, AANAPOW, AANAPOW_A_POWD,
+			   AANAPOW_A_POWD);
+
+	regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+			   ADAC1ODC_HP_DIS_RES_ON);
+
+	regmap_update_bits(map, ADAC1ODC, ADAC1ODC_ADAC_RAMPCLT_MASK,
+			   ADAC1ODC_ADAC_RAMPCLT_REDUCE);
+
+	regmap_update_bits(map, ADACSEQ2(0), ADACSEQ2_ADACIN_FIX, 0);
+	regmap_update_bits(map, ADACSEQ2(1), ADACSEQ2_ADACIN_FIX, 0);
+	regmap_update_bits(map, ADACSEQ2(2), ADACSEQ2_ADACIN_FIX, 0);
+}
+
+static void evea_set_power_state_off(struct evea_priv *evea)
+{
+	struct regmap *map = evea->regmap;
+
+	regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+			   ADAC1ODC_HP_DIS_RES_ON);
+
+	regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+			   ADACSEQ1_MMUTE);
+	regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+			   ADACSEQ1_MMUTE);
+	regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+			   ADACSEQ1_MMUTE);
+
+	regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+	regmap_update_bits(map, ALO2OUTPOW, ALO2OUTPOW_LO2_ON, 0);
+	regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+}
+
+static int evea_update_switch_lin(struct evea_priv *evea)
+{
+	struct regmap *map = evea->regmap;
+
+	if (evea->switch_lin) {
+		regmap_update_bits(map, ALINEPOW,
+				   ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD,
+				   ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD);
+
+		regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD,
+				   AADCPOW_AADC_POWD);
+		regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD,
+				   AADCPOW_AADC_POWD);
+	} else {
+		regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, 0);
+		regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, 0);
+
+		regmap_update_bits(map, ALINEPOW,
+				   ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD, 0);
+	}
+
+	return 0;
+}
+
+static int evea_update_switch_lo(struct evea_priv *evea)
+{
+	struct regmap *map = evea->regmap;
+
+	if (evea->switch_lo) {
+		regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, 0);
+		regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, 0);
+
+		regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON,
+				   ALO1OUTPOW_LO1_ON);
+		regmap_update_bits(map, ALO2OUTPOW,
+				   ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+				   ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON);
+	} else {
+		regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+				   ADACSEQ1_MMUTE);
+		regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+				   ADACSEQ1_MMUTE);
+
+		regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+		regmap_update_bits(map, ALO2OUTPOW,
+				   ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+				   0);
+	}
+
+	return 0;
+}
+
+static int evea_update_switch_hp(struct evea_priv *evea)
+{
+	struct regmap *map = evea->regmap;
+
+	if (evea->switch_hp) {
+		regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, 0);
+
+		regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON,
+				   AHPOUTPOW_HP_ON);
+
+		regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+				   ADAC1ODC_HP_DIS_RES_OFF);
+	} else {
+		regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+				   ADAC1ODC_HP_DIS_RES_ON);
+
+		regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+				   ADACSEQ1_MMUTE);
+
+		regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+	}
+
+	return 0;
+}
+
+static void evea_update_switch_all(struct evea_priv *evea)
+{
+	evea_update_switch_lin(evea);
+	evea_update_switch_lo(evea);
+	evea_update_switch_hp(evea);
+}
+
+static int evea_get_switch_lin(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = evea->switch_lin;
+
+	return 0;
+}
+
+static int evea_set_switch_lin(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	if (evea->switch_lin == ucontrol->value.integer.value[0])
+		return 0;
+
+	evea->switch_lin = ucontrol->value.integer.value[0];
+
+	return evea_update_switch_lin(evea);
+}
+
+static int evea_get_switch_lo(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = evea->switch_lo;
+
+	return 0;
+}
+
+static int evea_set_switch_lo(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	if (evea->switch_lo == ucontrol->value.integer.value[0])
+		return 0;
+
+	evea->switch_lo = ucontrol->value.integer.value[0];
+
+	return evea_update_switch_lo(evea);
+}
+
+static int evea_get_switch_hp(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = evea->switch_hp;
+
+	return 0;
+}
+
+static int evea_set_switch_hp(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	if (evea->switch_hp == ucontrol->value.integer.value[0])
+		return 0;
+
+	evea->switch_hp = ucontrol->value.integer.value[0];
+
+	return evea_update_switch_hp(evea);
+}
+
+static const struct snd_kcontrol_new eva_controls[] = {
+	SOC_SINGLE_BOOL_EXT("Line Capture Switch", 0,
+			    evea_get_switch_lin, evea_set_switch_lin),
+	SOC_SINGLE_BOOL_EXT("Line Playback Switch", 0,
+			    evea_get_switch_lo, evea_set_switch_lo),
+	SOC_SINGLE_BOOL_EXT("Headphone Playback Switch", 0,
+			    evea_get_switch_hp, evea_set_switch_hp),
+};
+
+static int evea_codec_probe(struct snd_soc_codec *codec)
+{
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	evea->switch_lin = 1;
+	evea->switch_lo = 1;
+	evea->switch_hp = 1;
+
+	evea_set_power_state_on(evea);
+	evea_update_switch_all(evea);
+
+	return 0;
+}
+
+static int evea_codec_suspend(struct snd_soc_codec *codec)
+{
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+	evea_set_power_state_off(evea);
+
+	reset_control_assert(evea->rst_adamv);
+	reset_control_assert(evea->rst_exiv);
+	reset_control_assert(evea->rst);
+
+	clk_disable_unprepare(evea->clk_exiv);
+	clk_disable_unprepare(evea->clk);
+
+	return 0;
+}
+
+static int evea_codec_resume(struct snd_soc_codec *codec)
+{
+	struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	ret = clk_prepare_enable(evea->clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(evea->clk_exiv);
+	if (ret)
+		goto err_out_clock;
+
+	ret = reset_control_deassert(evea->rst);
+	if (ret)
+		goto err_out_clock_exiv;
+
+	ret = reset_control_deassert(evea->rst_exiv);
+	if (ret)
+		goto err_out_reset;
+
+	ret = reset_control_deassert(evea->rst_adamv);
+	if (ret)
+		goto err_out_reset_exiv;
+
+	evea_set_power_state_on(evea);
+	evea_update_switch_all(evea);
+
+	return 0;
+
+err_out_reset_exiv:
+	reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+	reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+	clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+	clk_disable_unprepare(evea->clk);
+
+	return ret;
+}
+
+static struct snd_soc_codec_driver soc_codec_evea = {
+	.probe   = evea_codec_probe,
+	.suspend = evea_codec_suspend,
+	.resume  = evea_codec_resume,
+
+	.component_driver = {
+		.dapm_widgets = evea_widgets,
+		.num_dapm_widgets = ARRAY_SIZE(evea_widgets),
+		.dapm_routes = evea_routes,
+		.num_dapm_routes = ARRAY_SIZE(evea_routes),
+		.controls = eva_controls,
+		.num_controls = ARRAY_SIZE(eva_controls),
+	},
+};
+
+static struct snd_soc_dai_driver soc_dai_evea[] = {
+	{
+		.name     = DRV_NAME "-line1",
+		.playback = {
+			.stream_name  = "Line Out 1",
+			.formats      = EVEA_FORMATS,
+			.rates        = EVEA_RATES,
+			.channels_min = 2,
+			.channels_max = 2,
+		},
+		.capture = {
+			.stream_name  = "Line In 1",
+			.formats      = EVEA_FORMATS,
+			.rates        = EVEA_RATES,
+			.channels_min = 2,
+			.channels_max = 2,
+		},
+	},
+	{
+		.name     = DRV_NAME "-hp1",
+		.playback = {
+			.stream_name  = "Headphone 1",
+			.formats      = EVEA_FORMATS,
+			.rates        = EVEA_RATES,
+			.channels_min = 2,
+			.channels_max = 2,
+		},
+	},
+	{
+		.name     = DRV_NAME "-lo2",
+		.playback = {
+			.stream_name  = "Line Out 2",
+			.formats      = EVEA_FORMATS,
+			.rates        = EVEA_RATES,
+			.channels_min = 2,
+			.channels_max = 2,
+		},
+	},
+};
+
+static const struct regmap_config evea_regmap_config = {
+	.reg_bits      = 32,
+	.reg_stride    = 4,
+	.val_bits      = 32,
+	.max_register  = 0xffc,
+	.cache_type    = REGCACHE_NONE,
+};
+
+static int evea_probe(struct platform_device *pdev)
+{
+	struct evea_priv *evea;
+	struct resource *res;
+	void __iomem *preg;
+	int ret;
+
+	evea = devm_kzalloc(&pdev->dev, sizeof(struct evea_priv), GFP_KERNEL);
+	if (!evea)
+		return -ENOMEM;
+
+	evea->clk = devm_clk_get(&pdev->dev, "evea");
+	if (IS_ERR(evea->clk))
+		return PTR_ERR(evea->clk);
+
+	evea->clk_exiv = devm_clk_get(&pdev->dev, "exiv");
+	if (IS_ERR(evea->clk_exiv))
+		return PTR_ERR(evea->clk_exiv);
+
+	evea->rst = devm_reset_control_get_shared(&pdev->dev, "evea");
+	if (IS_ERR(evea->rst))
+		return PTR_ERR(evea->rst);
+
+	evea->rst_exiv = devm_reset_control_get_shared(&pdev->dev, "exiv");
+	if (IS_ERR(evea->rst_exiv))
+		return PTR_ERR(evea->rst_exiv);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	preg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(preg))
+		return PTR_ERR(preg);
+
+	evea->regmap = devm_regmap_init_mmio(&pdev->dev, preg,
+					     &evea_regmap_config);
+	if (IS_ERR(evea->regmap))
+		return PTR_ERR(evea->regmap);
+
+	ret = clk_prepare_enable(evea->clk);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(evea->clk_exiv);
+	if (ret)
+		goto err_out_clock;
+
+	ret = reset_control_deassert(evea->rst);
+	if (ret)
+		goto err_out_clock_exiv;
+
+	ret = reset_control_deassert(evea->rst_exiv);
+	if (ret)
+		goto err_out_reset;
+
+	/* ADAMV will hangup if EXIV reset is asserted */
+	evea->rst_adamv = devm_reset_control_get_shared(&pdev->dev, "adamv");
+	if (IS_ERR(evea->rst_adamv)) {
+		ret = PTR_ERR(evea->rst_adamv);
+		goto err_out_reset_exiv;
+	}
+
+	ret = reset_control_deassert(evea->rst_adamv);
+	if (ret)
+		goto err_out_reset_exiv;
+
+	platform_set_drvdata(pdev, evea);
+
+	ret = snd_soc_register_codec(&pdev->dev, &soc_codec_evea,
+				     soc_dai_evea, ARRAY_SIZE(soc_dai_evea));
+	if (ret)
+		goto err_out_reset_adamv;
+
+	return 0;
+
+err_out_reset_adamv:
+	reset_control_assert(evea->rst_adamv);
+
+err_out_reset_exiv:
+	reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+	reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+	clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+	clk_disable_unprepare(evea->clk);
+
+	return ret;
+}
+
+static int evea_remove(struct platform_device *pdev)
+{
+	struct evea_priv *evea = platform_get_drvdata(pdev);
+
+	snd_soc_unregister_codec(&pdev->dev);
+
+	reset_control_assert(evea->rst_adamv);
+	reset_control_assert(evea->rst_exiv);
+	reset_control_assert(evea->rst);
+
+	clk_disable_unprepare(evea->clk_exiv);
+	clk_disable_unprepare(evea->clk);
+
+	return 0;
+}
+
+static const struct of_device_id evea_of_match[] = {
+	{ .compatible = "socionext,uniphier-evea", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, evea_of_match);
+
+static struct platform_driver evea_codec_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = of_match_ptr(evea_of_match),
+	},
+	.probe  = evea_probe,
+	.remove = evea_remove,
+};
+module_platform_driver(evea_codec_driver);
+
+MODULE_AUTHOR("Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier EVEA codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
index cefe7c7cd4f6f29fabd90e33495d7a6d8ac6dbdd..0a8e37a519f258e72317f39a87ac9cc60ad47f6f 100644
--- a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
+++ b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -2,7 +2,7 @@
 #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
 #define _UAPI__ASM_BPF_PERF_EVENT_H__
 
-#include <asm/ptrace.h>
+#include "ptrace.h"
 
 typedef user_pt_regs bpf_user_pt_regs_t;
 
diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h
new file mode 100644
index 0000000000000000000000000000000000000000..d17dd9e5d51638f08ee44ad3e94d840ea0fc568d
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/perf_regs.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_S390_PERF_REGS_H
+#define _ASM_S390_PERF_REGS_H
+
+enum perf_event_s390_regs {
+	PERF_REG_S390_R0,
+	PERF_REG_S390_R1,
+	PERF_REG_S390_R2,
+	PERF_REG_S390_R3,
+	PERF_REG_S390_R4,
+	PERF_REG_S390_R5,
+	PERF_REG_S390_R6,
+	PERF_REG_S390_R7,
+	PERF_REG_S390_R8,
+	PERF_REG_S390_R9,
+	PERF_REG_S390_R10,
+	PERF_REG_S390_R11,
+	PERF_REG_S390_R12,
+	PERF_REG_S390_R13,
+	PERF_REG_S390_R14,
+	PERF_REG_S390_R15,
+	PERF_REG_S390_FP0,
+	PERF_REG_S390_FP1,
+	PERF_REG_S390_FP2,
+	PERF_REG_S390_FP3,
+	PERF_REG_S390_FP4,
+	PERF_REG_S390_FP5,
+	PERF_REG_S390_FP6,
+	PERF_REG_S390_FP7,
+	PERF_REG_S390_FP8,
+	PERF_REG_S390_FP9,
+	PERF_REG_S390_FP10,
+	PERF_REG_S390_FP11,
+	PERF_REG_S390_FP12,
+	PERF_REG_S390_FP13,
+	PERF_REG_S390_FP14,
+	PERF_REG_S390_FP15,
+	PERF_REG_S390_MASK,
+	PERF_REG_S390_PC,
+
+	PERF_REG_S390_MAX
+};
+
+#endif /* _ASM_S390_PERF_REGS_H */
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index e2450c8e88e6f13cfc0934b772880e80b6f18c37..a8c3a33dd185e213f68f97b8719c7ca20198783e 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -523,21 +523,23 @@ static int do_show(int argc, char **argv)
 				break;
 			p_err("can't get next map: %s%s", strerror(errno),
 			      errno == EINVAL ? " -- kernel too old?" : "");
-			return -1;
+			break;
 		}
 
 		fd = bpf_map_get_fd_by_id(id);
 		if (fd < 0) {
+			if (errno == ENOENT)
+				continue;
 			p_err("can't get map by id (%u): %s",
 			      id, strerror(errno));
-			return -1;
+			break;
 		}
 
 		err = bpf_obj_get_info_by_fd(fd, &info, &len);
 		if (err) {
 			p_err("can't get map info: %s", strerror(errno));
 			close(fd);
-			return -1;
+			break;
 		}
 
 		if (json_output)
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index ad619b96c27664300df9a78bc28958c4d8662a4b..dded77345bfb05fef1fd50a3f3e7f54d43ae5cb7 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -382,6 +382,8 @@ static int do_show(int argc, char **argv)
 
 		fd = bpf_prog_get_fd_by_id(id);
 		if (fd < 0) {
+			if (errno == ENOENT)
+				continue;
 			p_err("can't get prog by id (%u): %s",
 			      id, strerror(errno));
 			err = -1;
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 217cf6f95c366037ccd2ff3cedb1d61de22c616a..a5684d0968b4fd087905e659c0ce80bd170434c2 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -478,7 +478,7 @@ class Provider(object):
     @staticmethod
     def is_field_wanted(fields_filter, field):
         """Indicate whether field is valid according to fields_filter."""
-        if not fields_filter or fields_filter == "help":
+        if not fields_filter:
             return True
         return re.match(fields_filter, field) is not None
 
@@ -549,8 +549,8 @@ class TracepointProvider(Provider):
 
     def update_fields(self, fields_filter):
         """Refresh fields, applying fields_filter"""
-        self._fields = [field for field in self.get_available_fields()
-                        if self.is_field_wanted(fields_filter, field)]
+        self.fields = [field for field in self.get_available_fields()
+                       if self.is_field_wanted(fields_filter, field)]
 
     @staticmethod
     def get_online_cpus():
@@ -950,7 +950,8 @@ class Tui(object):
             curses.nocbreak()
             curses.endwin()
 
-    def get_all_gnames(self):
+    @staticmethod
+    def get_all_gnames():
         """Returns a list of (pid, gname) tuples of all running guests"""
         res = []
         try:
@@ -963,7 +964,7 @@ class Tui(object):
             # perform a sanity check before calling the more expensive
             # function to possibly extract the guest name
             if ' -name ' in line[1]:
-                res.append((line[0], self.get_gname_from_pid(line[0])))
+                res.append((line[0], Tui.get_gname_from_pid(line[0])))
         child.stdout.close()
 
         return res
@@ -984,7 +985,8 @@ class Tui(object):
         except Exception:
             self.screen.addstr(row + 1, 2, 'Not available')
 
-    def get_pid_from_gname(self, gname):
+    @staticmethod
+    def get_pid_from_gname(gname):
         """Fuzzy function to convert guest name to QEMU process pid.
 
         Returns a list of potential pids, can be empty if no match found.
@@ -992,7 +994,7 @@ class Tui(object):
 
         """
         pids = []
-        for line in self.get_all_gnames():
+        for line in Tui.get_all_gnames():
             if gname == line[1]:
                 pids.append(int(line[0]))
 
@@ -1090,15 +1092,16 @@ class Tui(object):
             # sort by totals
             return (0, -stats[x][0])
         total = 0.
-        for val in stats.values():
-            total += val[0]
+        for key in stats.keys():
+            if key.find('(') is -1:
+                total += stats[key][0]
         if self._sorting == SORT_DEFAULT:
             sortkey = sortCurAvg
         else:
             sortkey = sortTotal
+        tavg = 0
         for key in sorted(stats.keys(), key=sortkey):
-
-            if row >= self.screen.getmaxyx()[0]:
+            if row >= self.screen.getmaxyx()[0] - 1:
                 break
             values = stats[key]
             if not values[0] and not values[1]:
@@ -1110,9 +1113,15 @@ class Tui(object):
                 self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
                                    (key, values[0], values[0] * 100 / total,
                                     cur))
+                if cur is not '' and key.find('(') is -1:
+                    tavg += cur
             row += 1
         if row == 3:
             self.screen.addstr(4, 1, 'No matching events reported yet')
+        else:
+            self.screen.addstr(row, 1, '%-40s %10d        %8s' %
+                               ('Total', total, tavg if tavg else ''),
+                               curses.A_BOLD)
         self.screen.refresh()
 
     def show_msg(self, text):
@@ -1358,7 +1367,7 @@ class Tui(object):
                 if char == 'x':
                     self.update_drilldown()
                     # prevents display of current values on next refresh
-                    self.stats.get()
+                    self.stats.get(self._display_guests)
             except KeyboardInterrupt:
                 break
             except curses.error:
@@ -1451,16 +1460,13 @@ Press any other key to refresh statistics immediately.
         try:
             pids = Tui.get_pid_from_gname(val)
         except:
-            raise optparse.OptionValueError('Error while searching for guest '
-                                            '"{}", use "-p" to specify a pid '
-                                            'instead'.format(val))
+            sys.exit('Error while searching for guest "{}". Use "-p" to '
+                     'specify a pid instead?'.format(val))
         if len(pids) == 0:
-            raise optparse.OptionValueError('No guest by the name "{}" '
-                                            'found'.format(val))
+            sys.exit('Error: No guest by the name "{}" found'.format(val))
         if len(pids) > 1:
-            raise optparse.OptionValueError('Multiple processes found (pids: '
-                                            '{}) - use "-p" to specify a pid '
-                                            'instead'.format(" ".join(pids)))
+            sys.exit('Error: Multiple processes found (pids: {}). Use "-p" '
+                     'to specify the desired pid'.format(" ".join(pids)))
         parser.values.pid = pids[0]
 
     optparser = optparse.OptionParser(description=description_text,
@@ -1518,7 +1524,16 @@ Press any other key to refresh statistics immediately.
                          help='restrict statistics to guest by name',
                          callback=cb_guest_to_pid,
                          )
-    (options, _) = optparser.parse_args(sys.argv)
+    options, unkn = optparser.parse_args(sys.argv)
+    if len(unkn) != 1:
+        sys.exit('Error: Extra argument(s): ' + ' '.join(unkn[1:]))
+    try:
+        # verify that we were passed a valid regex up front
+        re.compile(options.fields)
+    except re.error:
+        sys.exit('Error: "' + options.fields + '" is not a valid regular '
+                 'expression')
+
     return options
 
 
@@ -1564,16 +1579,13 @@ def main():
 
     stats = Stats(options)
 
-    if options.fields == "help":
-        event_list = "\n"
-        s = stats.get()
-        for key in s.keys():
-            if key.find('(') != -1:
-                key = key[0:key.find('(')]
-            if event_list.find('\n' + key + '\n') == -1:
-                event_list += key + '\n'
-        sys.stdout.write(event_list)
-        return ""
+    if options.fields == 'help':
+        stats.fields_filter = None
+        event_list = []
+        for key in stats.get().keys():
+            event_list.append(key.split('(', 1)[0])
+        sys.stdout.write('  ' + '\n  '.join(sorted(set(event_list))) + '\n')
+        sys.exit(0)
 
     if options.log:
         log(stats)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index e5cf836be8a1848bb82f39cfa3c7c75dcc67b4fa..b5b3810c9e945d7f3a39568840fbc5b73f84983b 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -50,6 +50,8 @@ INTERACTIVE COMMANDS
 *s*::   set update interval
 
 *x*::	toggle reporting of stats for child trace events
+ ::     *Note*: The stats for the parents summarize the respective child trace
+                events
 
 Press any other key to refresh statistics immediately.
 
@@ -86,7 +88,7 @@ OPTIONS
 
 -f<fields>::
 --fields=<fields>::
-	fields to display (regex)
+	fields to display (regex), "-f help" for a list of available events
 
 -h::
 --help::
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 8acfc47af70efde4c1a3bb3ad6aff809f0ad0308..540a209b78ab3cd6ae3b972c57b338dc0aa9b58d 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
 			*type = INSN_STACK;
 			op->src.type = OP_SRC_ADD;
 			op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
-			op->dest.type = OP_SRC_REG;
+			op->dest.type = OP_DEST_REG;
 			op->dest.reg = CFI_SP;
 		}
 		break;
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
index 4c6b5c9ef073b31c3a01dd0ebb85efbccb4bfee0..91e8e19ff5e06193adc55c455b4d97ad947da7ee 100644
--- a/tools/objtool/builtin-orc.c
+++ b/tools/objtool/builtin-orc.c
@@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv)
 	const char *objname;
 
 	argc--; argv++;
+	if (argc <= 0)
+		usage_with_options(orc_usage, check_options);
+
 	if (!strncmp(argv[0], "gen", 3)) {
 		argc = parse_options(argc, argv, check_options, orc_usage, 0);
 		if (argc != 1)
@@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv)
 		objname = argv[0];
 
 		return check(objname, no_fp, no_unreachable, true);
-
 	}
 
 	if (!strcmp(argv[0], "dump")) {
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index e5ca31429c9bac29a9c66c4f8ddf97a94651690b..e61fe703197baa9b21814674c0250f6420e38c93 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file)
 
 	/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
 	sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
+	if (!sec)
+		return -1;
 
 	ip_relasec = elf_create_rela_section(file->elf, sec);
 	if (!ip_relasec)
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index ed65e82f034efe0e76cef718fdb8a9bd07511edb..0294bfb6c5f87c990500ee57e931034091d0da48 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -188,9 +188,7 @@ ifdef PYTHON_CONFIG
   PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
   PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
-  ifeq ($(CC_NO_CLANG), 1)
-    PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
-  endif
+  PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
 endif
 
@@ -576,14 +574,15 @@ ifndef NO_GTK2
   endif
 endif
 
-
 ifdef NO_LIBPERL
   CFLAGS += -DNO_LIBPERL
 else
   PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
   PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
   PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
-  PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+  PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
+  PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
+  PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
   FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
   ifneq ($(feature-libperl), 1)
diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h
index d2df54a6bc5a24a588ca3153833623ca0ecaa1c3..bcfbaed78cc257f15d9820238bc1ef58256f1c6a 100644
--- a/tools/perf/arch/s390/include/perf_regs.h
+++ b/tools/perf/arch/s390/include/perf_regs.h
@@ -3,7 +3,7 @@
 
 #include <stdlib.h>
 #include <linux/types.h>
-#include <../../../../arch/s390/include/uapi/asm/perf_regs.h>
+#include <asm/perf_regs.h>
 
 void perf_regs_load(u64 *regs);
 
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 6db9d809fe9722a9e4eb0afb5140443490aa43e3..3e64f10b6d6678810715e517e7354679b86efd8e 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -21,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h
 arch/arm/include/uapi/asm/perf_regs.h
 arch/arm64/include/uapi/asm/perf_regs.h
 arch/powerpc/include/uapi/asm/perf_regs.h
+arch/s390/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/kvm.h
 arch/x86/include/uapi/asm/kvm_perf.h
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index cf36de7ea25587907d50db9c44e3dae5c3746c1a..0c6d1002b524eaf62ef62cc32763b041b2f33ba1 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym,
 }
 
 int
-jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
-		       jvmti_line_info_t *li, int nr_lines)
+jvmti_write_debug_info(void *agent, uint64_t code,
+    int nr_lines, jvmti_line_info_t *li,
+    const char * const * file_names)
 {
 	struct jr_code_debug_info rec;
-	size_t sret, len, size, flen;
+	size_t sret, len, size, flen = 0;
 	uint64_t addr;
-	const char *fn = file;
 	FILE *fp = agent;
 	int i;
 
@@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
 		return -1;
 	}
 
-	flen = strlen(file) + 1;
+	for (i = 0; i < nr_lines; ++i) {
+	    flen += strlen(file_names[i]) + 1;
+	}
 
 	rec.p.id        = JIT_CODE_DEBUG_INFO;
 	size            = sizeof(rec);
@@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
 	 * file[]   : source file name
 	 */
 	size += nr_lines * sizeof(struct debug_entry);
-	size += flen * nr_lines;
+	size += flen;
 	rec.p.total_size = size;
 
 	/*
@@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
 		if (sret != 1)
 			goto error;
 
-		sret = fwrite_unlocked(fn, flen, 1, fp);
+		sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp);
 		if (sret != 1)
 			goto error;
 	}
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
index fe32d8344a823f56de37f3ecea53af4c2f78329f..6ed82f6c06ddd0ed85ee79a5015707753d4f38b9 100644
--- a/tools/perf/jvmti/jvmti_agent.h
+++ b/tools/perf/jvmti/jvmti_agent.h
@@ -14,6 +14,7 @@ typedef struct {
 	unsigned long	pc;
 	int		line_number;
 	int		discrim; /* discriminator -- 0 for now */
+	jmethodID	methodID;
 } jvmti_line_info_t;
 
 void *jvmti_open(void);
@@ -22,11 +23,9 @@ int   jvmti_write_code(void *agent, char const *symbol_name,
 		       uint64_t vma, void const *code,
 		       const unsigned int code_size);
 
-int   jvmti_write_debug_info(void *agent,
-		             uint64_t code,
-			     const char *file,
+int   jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines,
 			     jvmti_line_info_t *li,
-			     int nr_lines);
+			     const char * const * file_names);
 
 #if defined(__cplusplus)
 }
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index c62c9fc9a525995c83ef8fc8bf2f790d599abadc..6add3e9826141346a34a93b9e0a970a22720e361 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
 			tab[lines].pc = (unsigned long)pc;
 			tab[lines].line_number = loc_tab[i].line_number;
 			tab[lines].discrim = 0; /* not yet used */
+			tab[lines].methodID = m;
 			lines++;
 		} else {
 			break;
@@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
 	return JVMTI_ERROR_NONE;
 }
 
+static void
+copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
+{
+	/*
+	* Assume path name is class hierarchy, this is a common practice with Java programs
+	*/
+	if (*class_sign == 'L') {
+		int j, i = 0;
+		char *p = strrchr(class_sign, '/');
+		if (p) {
+			/* drop the 'L' prefix and copy up to the final '/' */
+			for (i = 0; i < (p - class_sign); i++)
+				result[i] = class_sign[i+1];
+		}
+		/*
+		* append file name, we use loops and not string ops to avoid modifying
+		* class_sign which is used later for the symbol name
+		*/
+		for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++)
+			result[i] = file_name[j];
+
+		result[i] = '\0';
+	} else {
+		/* fallback case */
+		size_t file_name_len = strlen(file_name);
+		strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
+	}
+}
+
+static jvmtiError
+get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer)
+{
+	jvmtiError ret;
+	jclass decl_class;
+	char *file_name = NULL;
+	char *class_sign = NULL;
+	char fn[PATH_MAX];
+	size_t len;
+
+	ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class);
+	if (ret != JVMTI_ERROR_NONE) {
+		print_error(jvmti, "GetMethodDeclaringClass", ret);
+		return ret;
+	}
+
+	ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
+	if (ret != JVMTI_ERROR_NONE) {
+		print_error(jvmti, "GetSourceFileName", ret);
+		return ret;
+	}
+
+	ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL);
+	if (ret != JVMTI_ERROR_NONE) {
+		print_error(jvmti, "GetClassSignature", ret);
+		goto free_file_name_error;
+	}
+
+	copy_class_filename(class_sign, file_name, fn, PATH_MAX);
+	len = strlen(fn);
+	*buffer = malloc((len + 1) * sizeof(char));
+	if (!*buffer) {
+		print_error(jvmti, "GetClassSignature", ret);
+		ret = JVMTI_ERROR_OUT_OF_MEMORY;
+		goto free_class_sign_error;
+	}
+	strcpy(*buffer, fn);
+	ret = JVMTI_ERROR_NONE;
+
+free_class_sign_error:
+	(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
+free_file_name_error:
+	(*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
+
+	return ret;
+}
+
+static jvmtiError
+fill_source_filenames(jvmtiEnv *jvmti, int nr_lines,
+		      const jvmti_line_info_t * line_tab,
+		      char ** file_names)
+{
+	int index;
+	jvmtiError ret;
+
+	for (index = 0; index < nr_lines; ++index) {
+		ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index]));
+		if (ret != JVMTI_ERROR_NONE)
+			return ret;
+	}
+
+	return JVMTI_ERROR_NONE;
+}
+
 static void JNICALL
 compiled_method_load_cb(jvmtiEnv *jvmti,
 			jmethodID method,
@@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
 			const void *compile_info)
 {
 	jvmti_line_info_t *line_tab = NULL;
+	char ** line_file_names = NULL;
 	jclass decl_class;
 	char *class_sign = NULL;
 	char *func_name = NULL;
 	char *func_sign = NULL;
-	char *file_name= NULL;
+	char *file_name = NULL;
 	char fn[PATH_MAX];
 	uint64_t addr = (uint64_t)(uintptr_t)code_addr;
 	jvmtiError ret;
 	int nr_lines = 0; /* in line_tab[] */
 	size_t len;
+	int output_debug_info = 0;
 
 	ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
 						&decl_class);
@@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
 		if (ret != JVMTI_ERROR_NONE) {
 			warnx("jvmti: cannot get line table for method");
 			nr_lines = 0;
+		} else if (nr_lines > 0) {
+			line_file_names = malloc(sizeof(char*) * nr_lines);
+			if (!line_file_names) {
+				warnx("jvmti: cannot allocate space for line table method names");
+			} else {
+				memset(line_file_names, 0, sizeof(char*) * nr_lines);
+				ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names);
+				if (ret != JVMTI_ERROR_NONE) {
+					warnx("jvmti: fill_source_filenames failed");
+				} else {
+					output_debug_info = 1;
+				}
+			}
 		}
 	}
 
@@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
 		goto error;
 	}
 
-	/*
-	 * Assume path name is class hierarchy, this is a common practice with Java programs
-	 */
-	if (*class_sign == 'L') {
-		int j, i = 0;
-		char *p = strrchr(class_sign, '/');
-		if (p) {
-			/* drop the 'L' prefix and copy up to the final '/' */
-			for (i = 0; i < (p - class_sign); i++)
-				fn[i] = class_sign[i+1];
-		}
-		/*
-		 * append file name, we use loops and not string ops to avoid modifying
-		 * class_sign which is used later for the symbol name
-		 */
-		for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
-			fn[i] = file_name[j];
-		fn[i] = '\0';
-	} else {
-		/* fallback case */
-		strcpy(fn, file_name);
-	}
+	copy_class_filename(class_sign, file_name, fn, PATH_MAX);
+
 	/*
 	 * write source line info record if we have it
 	 */
-	if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines))
-		warnx("jvmti: write_debug_info() failed");
+	if (output_debug_info)
+		if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names))
+			warnx("jvmti: write_debug_info() failed");
 
 	len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
 	{
@@ -223,6 +313,13 @@ error:
 	(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
 	(*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
 	free(line_tab);
+	while (line_file_names && (nr_lines > 0)) {
+	    if (line_file_names[nr_lines - 1]) {
+	        free(line_file_names[nr_lines - 1]);
+	    }
+	    nr_lines -= 1;
+	}
+	free(line_file_names);
 }
 
 static void JNICALL
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 792af7c3b74f98029ae32953a26e342157be5632..9316e648a880db61cad82c399c30218695d7d215 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -11,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
 endif
 
 CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
-LDLIBS += -lcap -lelf
+LDLIBS += -lcap -lelf -lrt
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
 	test_align test_verifier_log test_dev_cgroup
@@ -39,7 +39,7 @@ $(BPFOBJ): force
 CLANG ?= clang
 LLC   ?= llc
 
-PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
+PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
 
 # Let newer LLVM versions transparently probe the kernel for availability
 # of full BPF instruction set.
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 69427531408dd22ef1887d473ab4bf6548e173b9..6761be18a91fccc2d4f8ad52b0f83fec293189ae 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
 			  info_len != sizeof(struct bpf_map_info) ||
 			  strcmp((char *)map_infos[i].name, expected_map_name),
 			  "get-map-info(fd)",
-			  "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
+			  "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
 			  err, errno,
 			  map_infos[i].type, BPF_MAP_TYPE_ARRAY,
 			  info_len, sizeof(struct bpf_map_info),
@@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
 			  *(int *)prog_infos[i].map_ids != map_infos[i].id ||
 			  strcmp((char *)prog_infos[i].name, expected_prog_name),
 			  "get-prog-info(fd)",
-			  "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
+			  "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
 			  err, errno, i,
 			  prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
 			  info_len, sizeof(struct bpf_prog_info),
@@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
 		      memcmp(&prog_info, &prog_infos[i], info_len) ||
 		      *(int *)prog_info.map_ids != saved_map_id,
 		      "get-prog-info(next_id->fd)",
-		      "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n",
+		      "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
 		      err, errno, info_len, sizeof(struct bpf_prog_info),
 		      memcmp(&prog_info, &prog_infos[i], info_len),
 		      *(int *)prog_info.map_ids, saved_map_id);
@@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
 		      memcmp(&map_info, &map_infos[i], info_len) ||
 		      array_value != array_magic_value,
 		      "check get-map-info(next_id->fd)",
-		      "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n",
+		      "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
 		      err, errno, info_len, sizeof(struct bpf_map_info),
 		      memcmp(&map_info, &map_infos[i], info_len),
 		      array_value, array_magic_value);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 3c64f30cf63cc2b6adb532a3b1f3201533193f7f..b51017404c62d0dc8198afdf035016f6e5e2fd0b 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -422,9 +422,7 @@ static struct bpf_test tests[] = {
 			BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr_unpriv = "R1 subtraction from stack pointer",
-		.result_unpriv = REJECT,
-		.errstr = "R1 invalid mem access",
+		.errstr = "R1 subtraction from stack pointer",
 		.result = REJECT,
 	},
 	{
@@ -606,7 +604,6 @@ static struct bpf_test tests[] = {
 		},
 		.errstr = "misaligned stack access",
 		.result = REJECT,
-		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
 	},
 	{
 		"invalid map_fd for function call",
@@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {
 		},
 		.result = REJECT,
 		.errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
-		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
 	},
 	{
 		"PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {
 		},
 		.result = REJECT,
 		.errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
-		.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
 	},
 	{
 		"PTR_TO_STACK store/load - out of bounds low",
@@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.result = ACCEPT,
-		.result_unpriv = REJECT,
-		.errstr_unpriv = "R1 pointer += pointer",
+		.result = REJECT,
+		.errstr = "R1 pointer += pointer",
 	},
 	{
 		"unpriv: neg pointer",
@@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {
 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
 				    offsetof(struct __sk_buff, data)),
 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
-			BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, len)),
 			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
 			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
 			BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "invalid access to packet",
+		.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
@@ -3885,9 +3880,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map2 = { 3, 11 },
-		.errstr_unpriv = "R0 pointer += pointer",
-		.errstr = "R0 invalid mem access 'inv'",
-		.result_unpriv = REJECT,
+		.errstr = "R0 pointer += pointer",
 		.result = REJECT,
 		.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 	},
@@ -3928,7 +3921,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 4 },
-		.errstr = "R4 invalid mem access",
+		.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
 	},
@@ -3949,7 +3942,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 4 },
-		.errstr = "R4 invalid mem access",
+		.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
 	},
@@ -3970,7 +3963,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 4 },
-		.errstr = "R4 invalid mem access",
+		.errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS
 	},
@@ -5195,10 +5188,8 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map2 = { 3 },
-		.errstr_unpriv = "R0 bitwise operator &= on pointer",
-		.errstr = "invalid mem access 'inv'",
+		.errstr = "R0 bitwise operator &= on pointer",
 		.result = REJECT,
-		.result_unpriv = REJECT,
 	},
 	{
 		"map element value illegal alu op, 2",
@@ -5214,10 +5205,8 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map2 = { 3 },
-		.errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
-		.errstr = "invalid mem access 'inv'",
+		.errstr = "R0 32-bit pointer arithmetic prohibited",
 		.result = REJECT,
-		.result_unpriv = REJECT,
 	},
 	{
 		"map element value illegal alu op, 3",
@@ -5233,10 +5222,8 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map2 = { 3 },
-		.errstr_unpriv = "R0 pointer arithmetic with /= operator",
-		.errstr = "invalid mem access 'inv'",
+		.errstr = "R0 pointer arithmetic with /= operator",
 		.result = REJECT,
-		.result_unpriv = REJECT,
 	},
 	{
 		"map element value illegal alu op, 4",
@@ -6019,8 +6006,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map_in_map = { 3 },
-		.errstr = "R1 type=inv expected=map_ptr",
-		.errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+		.errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
 		.result = REJECT,
 	},
 	{
@@ -6116,6 +6102,30 @@ static struct bpf_test tests[] = {
 		},
 		.result = ACCEPT,
 	},
+	{
+		"ld_abs: tests on r6 and skb data reload helper",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+			BPF_LD_ABS(BPF_B, 0),
+			BPF_LD_ABS(BPF_H, 0),
+			BPF_LD_ABS(BPF_W, 0),
+			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+			BPF_MOV64_IMM(BPF_REG_6, 0),
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
+			BPF_MOV64_IMM(BPF_REG_3, 2),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_skb_vlan_push),
+			BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+			BPF_LD_ABS(BPF_B, 0),
+			BPF_LD_ABS(BPF_H, 0),
+			BPF_LD_ABS(BPF_W, 0),
+			BPF_MOV64_IMM(BPF_REG_0, 42),
+			BPF_EXIT_INSN(),
+		},
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+		.result = ACCEPT,
+	},
 	{
 		"ld_ind: check calling conv, r1",
 		.insns = {
@@ -6300,7 +6310,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6324,7 +6334,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6350,7 +6360,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R8 invalid mem access 'inv'",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6375,7 +6385,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R8 invalid mem access 'inv'",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6423,7 +6433,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6494,7 +6504,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6545,7 +6555,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6572,7 +6582,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6598,7 +6608,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6627,7 +6637,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6657,7 +6667,7 @@ static struct bpf_test tests[] = {
 			BPF_JMP_IMM(BPF_JA, 0, 0, -7),
 		},
 		.fixup_map1 = { 4 },
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 	},
 	{
@@ -6685,8 +6695,7 @@ static struct bpf_test tests[] = {
 			BPF_EXIT_INSN(),
 		},
 		.fixup_map1 = { 3 },
-		.errstr_unpriv = "R0 pointer comparison prohibited",
-		.errstr = "R0 min value is negative",
+		.errstr = "unbounded min value",
 		.result = REJECT,
 		.result_unpriv = REJECT,
 	},
@@ -6741,6 +6750,462 @@ static struct bpf_test tests[] = {
 		.errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
 		.result = REJECT,
 	},
+	{
+		"bounds check based on zero-extended MOV",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			/* r2 = 0x0000'0000'ffff'ffff */
+			BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
+			/* r2 = 0 */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+			/* no-op */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			/* access at offset 0 */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = ACCEPT
+	},
+	{
+		"bounds check based on sign-extended MOV. test1",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			/* r2 = 0xffff'ffff'ffff'ffff */
+			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+			/* r2 = 0xffff'ffff */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
+			/* r0 = <oob pointer> */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			/* access to OOB pointer */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "map_value pointer and 4294967295",
+		.result = REJECT
+	},
+	{
+		"bounds check based on sign-extended MOV. test2",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			/* r2 = 0xffff'ffff'ffff'ffff */
+			BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
+			/* r2 = 0xfff'ffff */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
+			/* r0 = <oob pointer> */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+			/* access to OOB pointer */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "R0 min value is outside of the array range",
+		.result = REJECT
+	},
+	{
+		"bounds check based on reg_off + var_off + insn_off. test1",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 4 },
+		.errstr = "value_size=8 off=1073741825",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"bounds check based on reg_off + var_off + insn_off. test2",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+				    offsetof(struct __sk_buff, mark)),
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 4 },
+		.errstr = "value 1073741823",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
+	{
+		"bounds check after truncation of non-boundary-crossing range",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+			/* r1 = [0x00, 0xff] */
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_MOV64_IMM(BPF_REG_2, 1),
+			/* r2 = 0x10'0000'0000 */
+			BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
+			/* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+			/* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+			/* r1 = [0x00, 0xff] */
+			BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
+			/* r1 = 0 */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+			/* no-op */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			/* access at offset 0 */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = ACCEPT
+	},
+	{
+		"bounds check after truncation of boundary-crossing range (1)",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+			/* r1 = [0x00, 0xff] */
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = [0xffff'ff80, 0xffff'ffff] or
+			 *      [0x0000'0000, 0x0000'007f]
+			 */
+			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = [0x00, 0xff] or
+			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+			 */
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = 0 or
+			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+			 */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+			/* no-op or OOB pointer computation */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			/* potentially OOB access */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		/* not actually fully unbounded, but the bound is very high */
+		.errstr = "R0 unbounded memory access",
+		.result = REJECT
+	},
+	{
+		"bounds check after truncation of boundary-crossing range (2)",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+			/* r1 = [0x00, 0xff] */
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = [0xffff'ff80, 0x1'0000'007f] */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = [0xffff'ff80, 0xffff'ffff] or
+			 *      [0x0000'0000, 0x0000'007f]
+			 * difference to previous test: truncation via MOV32
+			 * instead of ALU32.
+			 */
+			BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = [0x00, 0xff] or
+			 *      [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
+			 */
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
+			/* r1 = 0 or
+			 *      [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
+			 */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+			/* no-op or OOB pointer computation */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			/* potentially OOB access */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		/* not actually fully unbounded, but the bound is very high */
+		.errstr = "R0 unbounded memory access",
+		.result = REJECT
+	},
+	{
+		"bounds check after wrapping 32-bit addition",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+			/* r1 = 0x7fff'ffff */
+			BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
+			/* r1 = 0xffff'fffe */
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+			/* r1 = 0 */
+			BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
+			/* no-op */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			/* access at offset 0 */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = ACCEPT
+	},
+	{
+		"bounds check after shift with oversized count operand",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+			BPF_MOV64_IMM(BPF_REG_2, 32),
+			BPF_MOV64_IMM(BPF_REG_1, 1),
+			/* r1 = (u32)1 << (u32)32 = ? */
+			BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+			/* r1 = [0x0000, 0xffff] */
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
+			/* computes unknown pointer, potentially OOB */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			/* potentially OOB access */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "R0 max value is outside of the array range",
+		.result = REJECT
+	},
+	{
+		"bounds check after right shift of maybe-negative number",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+			/* r1 = [0x00, 0xff] */
+			BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			/* r1 = [-0x01, 0xfe] */
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
+			/* r1 = 0 or 0xff'ffff'ffff'ffff */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+			/* r1 = 0 or 0xffff'ffff'ffff */
+			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
+			/* computes unknown pointer, potentially OOB */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			/* potentially OOB access */
+			BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
+			/* exit */
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "R0 unbounded memory access",
+		.result = REJECT
+	},
+	{
+		"bounds check map access with off+size signed 32bit overflow. test1",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_JMP_A(0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "map_value pointer and 2147483646",
+		.result = REJECT
+	},
+	{
+		"bounds check map access with off+size signed 32bit overflow. test2",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_JMP_A(0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "pointer offset 1073741822",
+		.result = REJECT
+	},
+	{
+		"bounds check map access with off+size signed 32bit overflow. test3",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+			BPF_JMP_A(0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "pointer offset -1073741822",
+		.result = REJECT
+	},
+	{
+		"bounds check map access with off+size signed 32bit overflow. test4",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+			BPF_EXIT_INSN(),
+			BPF_MOV64_IMM(BPF_REG_1, 1000000),
+			BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
+			BPF_JMP_A(0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.errstr = "map_value pointer and 1000000000000",
+		.result = REJECT
+	},
+	{
+		"pointer/scalar confusion in state equality check (way 1)",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_JMP_A(1),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+			BPF_JMP_A(0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 leaks addr as return value"
+	},
+	{
+		"pointer/scalar confusion in state equality check (way 2)",
+		.insns = {
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
+			BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+			BPF_JMP_A(1),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 3 },
+		.result = ACCEPT,
+		.result_unpriv = REJECT,
+		.errstr_unpriv = "R0 leaks addr as return value"
+	},
 	{
 		"variable-offset ctx access",
 		.insns = {
@@ -6782,6 +7247,71 @@ static struct bpf_test tests[] = {
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_LWT_IN,
 	},
+	{
+		"indirect variable-offset stack access",
+		.insns = {
+			/* Fill the top 8 bytes of the stack */
+			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+			/* Get an unknown value */
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+			/* Make it small and 4-byte aligned */
+			BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
+			BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
+			/* add it to fp.  We now have either fp-4 or fp-8, but
+			 * we don't know which
+			 */
+			BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
+			/* dereference it indirectly */
+			BPF_LD_MAP_FD(BPF_REG_1, 0),
+			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+				     BPF_FUNC_map_lookup_elem),
+			BPF_MOV64_IMM(BPF_REG_0, 0),
+			BPF_EXIT_INSN(),
+		},
+		.fixup_map1 = { 5 },
+		.errstr = "variable stack read R2",
+		.result = REJECT,
+		.prog_type = BPF_PROG_TYPE_LWT_IN,
+	},
+	{
+		"direct stack access with 32-bit wraparound. test1",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
+			BPF_MOV32_IMM(BPF_REG_0, 0),
+			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_EXIT_INSN()
+		},
+		.errstr = "fp pointer and 2147483647",
+		.result = REJECT
+	},
+	{
+		"direct stack access with 32-bit wraparound. test2",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
+			BPF_MOV32_IMM(BPF_REG_0, 0),
+			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_EXIT_INSN()
+		},
+		.errstr = "fp pointer and 1073741823",
+		.result = REJECT
+	},
+	{
+		"direct stack access with 32-bit wraparound. test3",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
+			BPF_MOV32_IMM(BPF_REG_0, 0),
+			BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+			BPF_EXIT_INSN()
+		},
+		.errstr = "fp pointer offset 1073741822",
+		.result = REJECT
+	},
 	{
 		"liveness pruning and write screening",
 		.insns = {
@@ -7103,6 +7633,19 @@ static struct bpf_test tests[] = {
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
+	{
+		"pkt_end - pkt_start is allowed",
+		.insns = {
+			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+				    offsetof(struct __sk_buff, data_end)),
+			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+				    offsetof(struct __sk_buff, data)),
+			BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+	},
 	{
 		"XDP pkt read, pkt_end mangling, bad access 1",
 		.insns = {
@@ -7118,7 +7661,7 @@ static struct bpf_test tests[] = {
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "R1 offset is outside of the packet",
+		.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_XDP,
 	},
@@ -7137,7 +7680,7 @@ static struct bpf_test tests[] = {
 			BPF_MOV64_IMM(BPF_REG_0, 0),
 			BPF_EXIT_INSN(),
 		},
-		.errstr = "R1 offset is outside of the packet",
+		.errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
 		.result = REJECT,
 		.prog_type = BPF_PROG_TYPE_XDP,
 	},
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index e57b4ac40e72e0502dff75ea1d80c543280428eb..7177bea1fdfa62a1aa4e424d4dab665d8a9b7aaf 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -1,3 +1,4 @@
 CONFIG_USER_NS=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
+CONFIG_NUMA=y
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 66e5ce5b91f008d5dffc7848d0e99d730ad589b5..1aef72df20a112a30c17f2e567ca8b972a550104 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt,
 	 * NB: Different Linux versions do different things with the
 	 * accessed bit in set_thread_area().
 	 */
-	if (ar != expected_ar &&
-	    (ldt || ar != (expected_ar | AR_ACCESSED))) {
+	if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) {
 		printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
 		       (ldt ? "LDT" : "GDT"), index, ar, expected_ar);
 		nerrs++;
@@ -627,13 +626,10 @@ static void do_multicpu_tests(void)
 static int finish_exec_test(void)
 {
 	/*
-	 * In a sensible world, this would be check_invalid_segment(0, 1);
-	 * For better or for worse, though, the LDT is inherited across exec.
-	 * We can probably change this safely, but for now we test it.
+	 * Older kernel versions did inherit the LDT on exec() which is
+	 * wrong because exec() starts from a clean state.
 	 */
-	check_valid_segment(0, 1,
-			    AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB,
-			    42, true);
+	check_invalid_segment(0, 1);
 
 	return nerrs ? 1 : 0;
 }
diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
index 2b3d6d2350158b73a2597dc1a78ac007b6a3563c..3d7b42e7729941b1ca8f35fb7f6d10d4151a1a2f 100644
--- a/tools/usb/usbip/src/utils.c
+++ b/tools/usb/usbip/src/utils.c
@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
 	char command[SYSFS_BUS_ID_SIZE + 4];
 	char match_busid_attr_path[SYSFS_PATH_MAX];
 	int rc;
+	int cmd_size;
 
 	snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
 		 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
 		 attr_name);
 
 	if (add)
-		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
+		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
+				    busid);
 	else
-		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
+		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
+				    busid);
 
 	rc = write_sysfs_attribute(match_busid_attr_path, command,
-				   sizeof(command));
+				   cmd_size);
 	if (rc < 0) {
 		dbg("failed to write match_busid: %s", strerror(errno));
 		return -1;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index f9555b1e7f158f5203c1aaba47002424d3279203..cc29a814832837f5fb237dfdf74845a284e04367 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -92,16 +92,23 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
 {
 	struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
 	struct arch_timer_context *vtimer;
+	u32 cnt_ctl;
 
-	if (!vcpu) {
-		pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n");
-		return IRQ_NONE;
-	}
-	vtimer = vcpu_vtimer(vcpu);
+	/*
+	 * We may see a timer interrupt after vcpu_put() has been called which
+	 * sets the CPU's vcpu pointer to NULL, because even though the timer
+	 * has been disabled in vtimer_save_state(), the hardware interrupt
+	 * signal may not have been retired from the interrupt controller yet.
+	 */
+	if (!vcpu)
+		return IRQ_HANDLED;
 
+	vtimer = vcpu_vtimer(vcpu);
 	if (!vtimer->irq.level) {
-		vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
-		if (kvm_timer_irq_can_fire(vtimer))
+		cnt_ctl = read_sysreg_el0(cntv_ctl);
+		cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
+			   ARCH_TIMER_CTRL_IT_MASK;
+		if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
 			kvm_timer_update_irq(vcpu, true, vtimer);
 	}
 
@@ -355,6 +362,7 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
 
 	/* Disable the virtual timer */
 	write_sysreg_el0(0, cntv_ctl);
+	isb();
 
 	vtimer->loaded = false;
 out:
@@ -720,7 +728,7 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
 	return 0;
 }
 
-int kvm_timer_hyp_init(void)
+int kvm_timer_hyp_init(bool has_gic)
 {
 	struct arch_timer_kvm_info *info;
 	int err;
@@ -756,10 +764,13 @@ int kvm_timer_hyp_init(void)
 		return err;
 	}
 
-	err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus());
-	if (err) {
-		kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
-		goto out_free_irq;
+	if (has_gic) {
+		err = irq_set_vcpu_affinity(host_vtimer_irq,
+					    kvm_get_running_vcpus());
+		if (err) {
+			kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
+			goto out_free_irq;
+		}
 	}
 
 	kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
@@ -835,10 +846,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
 no_vgic:
 	preempt_disable();
 	timer->enabled = 1;
-	if (!irqchip_in_kernel(vcpu->kvm))
-		kvm_timer_vcpu_load_user(vcpu);
-	else
-		kvm_timer_vcpu_load_vgic(vcpu);
+	kvm_timer_vcpu_load(vcpu);
 	preempt_enable();
 
 	return 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 6b60c98a6e2294c773eb20ea4794445a667415ea..2e43f9d42bd5db2a07438bb98f5e029c6246adb4 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -1326,7 +1326,7 @@ static int init_subsystems(void)
 	/*
 	 * Init HYP architected timer support
 	 */
-	err = kvm_timer_hyp_init();
+	err = kvm_timer_hyp_init(vgic_present);
 	if (err)
 		goto out;
 
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index b6e715fd3c90af8c74408b72652f9974a3fb894d..dac7ceb1a677746cadb086a2cf8a07d8e560373c 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -112,7 +112,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		}
 
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
-			       data);
+			       &data);
 		data = vcpu_data_host_to_guest(vcpu, data, len);
 		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
 	}
@@ -182,14 +182,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
 		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
 					       len);
 
-		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
+		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
 		kvm_mmio_write_buf(data_buf, len, data);
 
 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
 				       data_buf);
 	} else {
 		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
-			       fault_ipa, 0);
+			       fault_ipa, NULL);
 
 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
 				      data_buf);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index b36945d49986dd5c0f097f16837d72d81f655308..b4b69c2d10120237e12bc6524243071bf645f1d9 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -509,8 +509,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
  */
 void free_hyp_pgds(void)
 {
-	unsigned long addr;
-
 	mutex_lock(&kvm_hyp_pgd_mutex);
 
 	if (boot_hyp_pgd) {
@@ -521,10 +519,10 @@ void free_hyp_pgds(void)
 
 	if (hyp_pgd) {
 		unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
-		for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
-			unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
-		for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
-			unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
+		unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
+				(uintptr_t)high_memory - PAGE_OFFSET);
+		unmap_hyp_range(hyp_pgd, kern_hyp_va(VMALLOC_START),
+				VMALLOC_END - VMALLOC_START);
 
 		free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
 		hyp_pgd = NULL;