Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.19/0101-4.19.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3254 - (hide annotations) (download)
Tue Nov 27 10:32:50 2018 UTC (5 years, 6 months ago) by niro
File size: 523620 byte(s)
-linux-4.19.2
1 niro 3254 diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
2     index 48b424de85bb..cfbc18f0d9c9 100644
3     --- a/Documentation/filesystems/fscrypt.rst
4     +++ b/Documentation/filesystems/fscrypt.rst
5     @@ -191,21 +191,11 @@ Currently, the following pairs of encryption modes are supported:
6    
7     - AES-256-XTS for contents and AES-256-CTS-CBC for filenames
8     - AES-128-CBC for contents and AES-128-CTS-CBC for filenames
9     -- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
10    
11     It is strongly recommended to use AES-256-XTS for contents encryption.
12     AES-128-CBC was added only for low-powered embedded devices with
13     crypto accelerators such as CAAM or CESA that do not support XTS.
14    
15     -Similarly, Speck128/256 support was only added for older or low-end
16     -CPUs which cannot do AES fast enough -- especially ARM CPUs which have
17     -NEON instructions but not the Cryptography Extensions -- and for which
18     -it would not otherwise be feasible to use encryption at all. It is
19     -not recommended to use Speck on CPUs that have AES instructions.
20     -Speck support is only available if it has been enabled in the crypto
21     -API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get
22     -acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
23     -
24     New encryption modes can be added relatively easily, without changes
25     to individual filesystems. However, authenticated encryption (AE)
26     modes are not currently supported because of the difficulty of dealing
27     diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
28     index e964074cd15b..b25e48afaa08 100644
29     --- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
30     +++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
31     @@ -16,10 +16,10 @@ CEC_RECEIVE, CEC_TRANSMIT - Receive or transmit a CEC message
32     Synopsis
33     ========
34    
35     -.. c:function:: int ioctl( int fd, CEC_RECEIVE, struct cec_msg *argp )
36     +.. c:function:: int ioctl( int fd, CEC_RECEIVE, struct cec_msg \*argp )
37     :name: CEC_RECEIVE
38    
39     -.. c:function:: int ioctl( int fd, CEC_TRANSMIT, struct cec_msg *argp )
40     +.. c:function:: int ioctl( int fd, CEC_TRANSMIT, struct cec_msg \*argp )
41     :name: CEC_TRANSMIT
42    
43     Arguments
44     @@ -272,6 +272,19 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
45     - The transmit failed after one or more retries. This status bit is
46     mutually exclusive with :ref:`CEC_TX_STATUS_OK <CEC-TX-STATUS-OK>`.
47     Other bits can still be set to explain which failures were seen.
48     + * .. _`CEC-TX-STATUS-ABORTED`:
49     +
50     + - ``CEC_TX_STATUS_ABORTED``
51     + - 0x40
52     + - The transmit was aborted due to an HDMI disconnect, or the adapter
53     + was unconfigured, or a transmit was interrupted, or the driver
54     + returned an error when attempting to start a transmit.
55     + * .. _`CEC-TX-STATUS-TIMEOUT`:
56     +
57     + - ``CEC_TX_STATUS_TIMEOUT``
58     + - 0x80
59     + - The transmit timed out. This should not normally happen and this
60     + indicates a driver problem.
61    
62    
63     .. tabularcolumns:: |p{5.6cm}|p{0.9cm}|p{11.0cm}|
64     @@ -300,6 +313,14 @@ View On' messages from initiator 0xf ('Unregistered') to destination 0 ('TV').
65     - The message was received successfully but the reply was
66     ``CEC_MSG_FEATURE_ABORT``. This status is only set if this message
67     was the reply to an earlier transmitted message.
68     + * .. _`CEC-RX-STATUS-ABORTED`:
69     +
70     + - ``CEC_RX_STATUS_ABORTED``
71     + - 0x08
72     + - The wait for a reply to an earlier transmitted message was aborted
73     + because the HDMI cable was disconnected, the adapter was unconfigured
74     + or the :ref:`CEC_TRANSMIT <CEC_RECEIVE>` that waited for a
75     + reply was interrupted.
76    
77    
78    
79     diff --git a/Documentation/media/uapi/v4l/biblio.rst b/Documentation/media/uapi/v4l/biblio.rst
80     index 1cedcfc04327..386d6cf83e9c 100644
81     --- a/Documentation/media/uapi/v4l/biblio.rst
82     +++ b/Documentation/media/uapi/v4l/biblio.rst
83     @@ -226,16 +226,6 @@ xvYCC
84    
85     :author: International Electrotechnical Commission (http://www.iec.ch)
86    
87     -.. _adobergb:
88     -
89     -AdobeRGB
90     -========
91     -
92     -
93     -:title: Adobe© RGB (1998) Color Image Encoding Version 2005-05
94     -
95     -:author: Adobe Systems Incorporated (http://www.adobe.com)
96     -
97     .. _oprgb:
98    
99     opRGB
100     diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst
101     index 410907fe9415..f24615544792 100644
102     --- a/Documentation/media/uapi/v4l/colorspaces-defs.rst
103     +++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst
104     @@ -51,8 +51,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
105     - See :ref:`col-rec709`.
106     * - ``V4L2_COLORSPACE_SRGB``
107     - See :ref:`col-srgb`.
108     - * - ``V4L2_COLORSPACE_ADOBERGB``
109     - - See :ref:`col-adobergb`.
110     + * - ``V4L2_COLORSPACE_OPRGB``
111     + - See :ref:`col-oprgb`.
112     * - ``V4L2_COLORSPACE_BT2020``
113     - See :ref:`col-bt2020`.
114     * - ``V4L2_COLORSPACE_DCI_P3``
115     @@ -90,8 +90,8 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
116     - Use the Rec. 709 transfer function.
117     * - ``V4L2_XFER_FUNC_SRGB``
118     - Use the sRGB transfer function.
119     - * - ``V4L2_XFER_FUNC_ADOBERGB``
120     - - Use the AdobeRGB transfer function.
121     + * - ``V4L2_XFER_FUNC_OPRGB``
122     + - Use the opRGB transfer function.
123     * - ``V4L2_XFER_FUNC_SMPTE240M``
124     - Use the SMPTE 240M transfer function.
125     * - ``V4L2_XFER_FUNC_NONE``
126     diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst
127     index b5d551b9cc8f..09fabf4cd412 100644
128     --- a/Documentation/media/uapi/v4l/colorspaces-details.rst
129     +++ b/Documentation/media/uapi/v4l/colorspaces-details.rst
130     @@ -290,15 +290,14 @@ Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
131     170M/BT.601. The Y'CbCr quantization is limited range.
132    
133    
134     -.. _col-adobergb:
135     +.. _col-oprgb:
136    
137     -Colorspace Adobe RGB (V4L2_COLORSPACE_ADOBERGB)
138     +Colorspace opRGB (V4L2_COLORSPACE_OPRGB)
139     ===============================================
140    
141     -The :ref:`adobergb` standard defines the colorspace used by computer
142     -graphics that use the AdobeRGB colorspace. This is also known as the
143     -:ref:`oprgb` standard. The default transfer function is
144     -``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
145     +The :ref:`oprgb` standard defines the colorspace used by computer
146     +graphics that use the opRGB colorspace. The default transfer function is
147     +``V4L2_XFER_FUNC_OPRGB``. The default Y'CbCr encoding is
148     ``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
149     range.
150    
151     @@ -312,7 +311,7 @@ The chromaticities of the primary colors and the white reference are:
152    
153     .. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
154    
155     -.. flat-table:: Adobe RGB Chromaticities
156     +.. flat-table:: opRGB Chromaticities
157     :header-rows: 1
158     :stub-columns: 0
159     :widths: 1 1 2
160     diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
161     index ca9f0edc579e..e420a39f1ebf 100644
162     --- a/Documentation/media/videodev2.h.rst.exceptions
163     +++ b/Documentation/media/videodev2.h.rst.exceptions
164     @@ -56,7 +56,8 @@ replace symbol V4L2_MEMORY_USERPTR :c:type:`v4l2_memory`
165     # Documented enum v4l2_colorspace
166     replace symbol V4L2_COLORSPACE_470_SYSTEM_BG :c:type:`v4l2_colorspace`
167     replace symbol V4L2_COLORSPACE_470_SYSTEM_M :c:type:`v4l2_colorspace`
168     -replace symbol V4L2_COLORSPACE_ADOBERGB :c:type:`v4l2_colorspace`
169     +replace symbol V4L2_COLORSPACE_OPRGB :c:type:`v4l2_colorspace`
170     +replace define V4L2_COLORSPACE_ADOBERGB :c:type:`v4l2_colorspace`
171     replace symbol V4L2_COLORSPACE_BT2020 :c:type:`v4l2_colorspace`
172     replace symbol V4L2_COLORSPACE_DCI_P3 :c:type:`v4l2_colorspace`
173     replace symbol V4L2_COLORSPACE_DEFAULT :c:type:`v4l2_colorspace`
174     @@ -69,7 +70,8 @@ replace symbol V4L2_COLORSPACE_SRGB :c:type:`v4l2_colorspace`
175    
176     # Documented enum v4l2_xfer_func
177     replace symbol V4L2_XFER_FUNC_709 :c:type:`v4l2_xfer_func`
178     -replace symbol V4L2_XFER_FUNC_ADOBERGB :c:type:`v4l2_xfer_func`
179     +replace symbol V4L2_XFER_FUNC_OPRGB :c:type:`v4l2_xfer_func`
180     +replace define V4L2_XFER_FUNC_ADOBERGB :c:type:`v4l2_xfer_func`
181     replace symbol V4L2_XFER_FUNC_DCI_P3 :c:type:`v4l2_xfer_func`
182     replace symbol V4L2_XFER_FUNC_DEFAULT :c:type:`v4l2_xfer_func`
183     replace symbol V4L2_XFER_FUNC_NONE :c:type:`v4l2_xfer_func`
184     diff --git a/Makefile b/Makefile
185     index abcd8ca4966f..c8fe567f18ab 100644
186     --- a/Makefile
187     +++ b/Makefile
188     @@ -1,7 +1,7 @@
189     # SPDX-License-Identifier: GPL-2.0
190     VERSION = 4
191     PATCHLEVEL = 19
192     -SUBLEVEL = 1
193     +SUBLEVEL = 2
194     EXTRAVERSION =
195     NAME = "People's Front"
196    
197     diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
198     index a0ddf497e8cd..2cb45ddd2ae3 100644
199     --- a/arch/arm/boot/dts/dra7.dtsi
200     +++ b/arch/arm/boot/dts/dra7.dtsi
201     @@ -354,7 +354,7 @@
202     ti,hwmods = "pcie1";
203     phys = <&pcie1_phy>;
204     phy-names = "pcie-phy0";
205     - ti,syscon-unaligned-access = <&scm_conf1 0x14 2>;
206     + ti,syscon-unaligned-access = <&scm_conf1 0x14 1>;
207     status = "disabled";
208     };
209     };
210     diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
211     index 2ab99f9f3d0a..dd9ec05eb0f7 100644
212     --- a/arch/arm/boot/dts/exynos4210-origen.dts
213     +++ b/arch/arm/boot/dts/exynos4210-origen.dts
214     @@ -151,6 +151,8 @@
215     reg = <0x66>;
216     interrupt-parent = <&gpx0>;
217     interrupts = <4 IRQ_TYPE_NONE>, <3 IRQ_TYPE_NONE>;
218     + pinctrl-names = "default";
219     + pinctrl-0 = <&max8997_irq>;
220    
221     max8997,pmic-buck1-dvs-voltage = <1350000>;
222     max8997,pmic-buck2-dvs-voltage = <1100000>;
223     @@ -288,6 +290,13 @@
224     };
225     };
226    
227     +&pinctrl_1 {
228     + max8997_irq: max8997-irq {
229     + samsung,pins = "gpx0-3", "gpx0-4";
230     + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
231     + };
232     +};
233     +
234     &sdhci_0 {
235     bus-width = <4>;
236     pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus4 &sd0_cd>;
237     diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
238     index da163a40af15..b85527faa6ea 100644
239     --- a/arch/arm/boot/dts/exynos5250.dtsi
240     +++ b/arch/arm/boot/dts/exynos5250.dtsi
241     @@ -54,62 +54,109 @@
242     device_type = "cpu";
243     compatible = "arm,cortex-a15";
244     reg = <0>;
245     - clock-frequency = <1700000000>;
246     clocks = <&clock CLK_ARM_CLK>;
247     clock-names = "cpu";
248     - clock-latency = <140000>;
249     -
250     - operating-points = <
251     - 1700000 1300000
252     - 1600000 1250000
253     - 1500000 1225000
254     - 1400000 1200000
255     - 1300000 1150000
256     - 1200000 1125000
257     - 1100000 1100000
258     - 1000000 1075000
259     - 900000 1050000
260     - 800000 1025000
261     - 700000 1012500
262     - 600000 1000000
263     - 500000 975000
264     - 400000 950000
265     - 300000 937500
266     - 200000 925000
267     - >;
268     + operating-points-v2 = <&cpu0_opp_table>;
269     #cooling-cells = <2>; /* min followed by max */
270     };
271     cpu@1 {
272     device_type = "cpu";
273     compatible = "arm,cortex-a15";
274     reg = <1>;
275     - clock-frequency = <1700000000>;
276     clocks = <&clock CLK_ARM_CLK>;
277     clock-names = "cpu";
278     - clock-latency = <140000>;
279     -
280     - operating-points = <
281     - 1700000 1300000
282     - 1600000 1250000
283     - 1500000 1225000
284     - 1400000 1200000
285     - 1300000 1150000
286     - 1200000 1125000
287     - 1100000 1100000
288     - 1000000 1075000
289     - 900000 1050000
290     - 800000 1025000
291     - 700000 1012500
292     - 600000 1000000
293     - 500000 975000
294     - 400000 950000
295     - 300000 937500
296     - 200000 925000
297     - >;
298     + operating-points-v2 = <&cpu0_opp_table>;
299     #cooling-cells = <2>; /* min followed by max */
300     };
301     };
302    
303     + cpu0_opp_table: opp_table0 {
304     + compatible = "operating-points-v2";
305     + opp-shared;
306     +
307     + opp-200000000 {
308     + opp-hz = /bits/ 64 <200000000>;
309     + opp-microvolt = <925000>;
310     + clock-latency-ns = <140000>;
311     + };
312     + opp-300000000 {
313     + opp-hz = /bits/ 64 <300000000>;
314     + opp-microvolt = <937500>;
315     + clock-latency-ns = <140000>;
316     + };
317     + opp-400000000 {
318     + opp-hz = /bits/ 64 <400000000>;
319     + opp-microvolt = <950000>;
320     + clock-latency-ns = <140000>;
321     + };
322     + opp-500000000 {
323     + opp-hz = /bits/ 64 <500000000>;
324     + opp-microvolt = <975000>;
325     + clock-latency-ns = <140000>;
326     + };
327     + opp-600000000 {
328     + opp-hz = /bits/ 64 <600000000>;
329     + opp-microvolt = <1000000>;
330     + clock-latency-ns = <140000>;
331     + };
332     + opp-700000000 {
333     + opp-hz = /bits/ 64 <700000000>;
334     + opp-microvolt = <1012500>;
335     + clock-latency-ns = <140000>;
336     + };
337     + opp-800000000 {
338     + opp-hz = /bits/ 64 <800000000>;
339     + opp-microvolt = <1025000>;
340     + clock-latency-ns = <140000>;
341     + };
342     + opp-900000000 {
343     + opp-hz = /bits/ 64 <900000000>;
344     + opp-microvolt = <1050000>;
345     + clock-latency-ns = <140000>;
346     + };
347     + opp-1000000000 {
348     + opp-hz = /bits/ 64 <1000000000>;
349     + opp-microvolt = <1075000>;
350     + clock-latency-ns = <140000>;
351     + opp-suspend;
352     + };
353     + opp-1100000000 {
354     + opp-hz = /bits/ 64 <1100000000>;
355     + opp-microvolt = <1100000>;
356     + clock-latency-ns = <140000>;
357     + };
358     + opp-1200000000 {
359     + opp-hz = /bits/ 64 <1200000000>;
360     + opp-microvolt = <1125000>;
361     + clock-latency-ns = <140000>;
362     + };
363     + opp-1300000000 {
364     + opp-hz = /bits/ 64 <1300000000>;
365     + opp-microvolt = <1150000>;
366     + clock-latency-ns = <140000>;
367     + };
368     + opp-1400000000 {
369     + opp-hz = /bits/ 64 <1400000000>;
370     + opp-microvolt = <1200000>;
371     + clock-latency-ns = <140000>;
372     + };
373     + opp-1500000000 {
374     + opp-hz = /bits/ 64 <1500000000>;
375     + opp-microvolt = <1225000>;
376     + clock-latency-ns = <140000>;
377     + };
378     + opp-1600000000 {
379     + opp-hz = /bits/ 64 <1600000000>;
380     + opp-microvolt = <1250000>;
381     + clock-latency-ns = <140000>;
382     + };
383     + opp-1700000000 {
384     + opp-hz = /bits/ 64 <1700000000>;
385     + opp-microvolt = <1300000>;
386     + clock-latency-ns = <140000>;
387     + };
388     + };
389     +
390     soc: soc {
391     sysram@2020000 {
392     compatible = "mmio-sram";
393     diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
394     index a4dcb68f4322..b4dd3846e8cc 100644
395     --- a/arch/arm/boot/dts/socfpga_arria10.dtsi
396     +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
397     @@ -613,7 +613,7 @@
398     status = "disabled";
399     };
400    
401     - sdr: sdr@ffc25000 {
402     + sdr: sdr@ffcfb100 {
403     compatible = "altr,sdr-ctl", "syscon";
404     reg = <0xffcfb100 0x80>;
405     };
406     diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
407     index 925d1364727a..b8e69fe282b8 100644
408     --- a/arch/arm/crypto/Kconfig
409     +++ b/arch/arm/crypto/Kconfig
410     @@ -121,10 +121,4 @@ config CRYPTO_CHACHA20_NEON
411     select CRYPTO_BLKCIPHER
412     select CRYPTO_CHACHA20
413    
414     -config CRYPTO_SPECK_NEON
415     - tristate "NEON accelerated Speck cipher algorithms"
416     - depends on KERNEL_MODE_NEON
417     - select CRYPTO_BLKCIPHER
418     - select CRYPTO_SPECK
419     -
420     endif
421     diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
422     index 8de542c48ade..bd5bceef0605 100644
423     --- a/arch/arm/crypto/Makefile
424     +++ b/arch/arm/crypto/Makefile
425     @@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
426     obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
427     obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
428     obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
429     -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
430    
431     ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
432     ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
433     @@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
434     crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
435     crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
436     chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
437     -speck-neon-y := speck-neon-core.o speck-neon-glue.o
438    
439     ifdef REGENERATE_ARM_CRYPTO
440     quiet_cmd_perl = PERL $@
441     diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
442     deleted file mode 100644
443     index 57caa742016e..000000000000
444     --- a/arch/arm/crypto/speck-neon-core.S
445     +++ /dev/null
446     @@ -1,434 +0,0 @@
447     -// SPDX-License-Identifier: GPL-2.0
448     -/*
449     - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
450     - *
451     - * Copyright (c) 2018 Google, Inc
452     - *
453     - * Author: Eric Biggers <ebiggers@google.com>
454     - */
455     -
456     -#include <linux/linkage.h>
457     -
458     - .text
459     - .fpu neon
460     -
461     - // arguments
462     - ROUND_KEYS .req r0 // const {u64,u32} *round_keys
463     - NROUNDS .req r1 // int nrounds
464     - DST .req r2 // void *dst
465     - SRC .req r3 // const void *src
466     - NBYTES .req r4 // unsigned int nbytes
467     - TWEAK .req r5 // void *tweak
468     -
469     - // registers which hold the data being encrypted/decrypted
470     - X0 .req q0
471     - X0_L .req d0
472     - X0_H .req d1
473     - Y0 .req q1
474     - Y0_H .req d3
475     - X1 .req q2
476     - X1_L .req d4
477     - X1_H .req d5
478     - Y1 .req q3
479     - Y1_H .req d7
480     - X2 .req q4
481     - X2_L .req d8
482     - X2_H .req d9
483     - Y2 .req q5
484     - Y2_H .req d11
485     - X3 .req q6
486     - X3_L .req d12
487     - X3_H .req d13
488     - Y3 .req q7
489     - Y3_H .req d15
490     -
491     - // the round key, duplicated in all lanes
492     - ROUND_KEY .req q8
493     - ROUND_KEY_L .req d16
494     - ROUND_KEY_H .req d17
495     -
496     - // index vector for vtbl-based 8-bit rotates
497     - ROTATE_TABLE .req d18
498     -
499     - // multiplication table for updating XTS tweaks
500     - GF128MUL_TABLE .req d19
501     - GF64MUL_TABLE .req d19
502     -
503     - // current XTS tweak value(s)
504     - TWEAKV .req q10
505     - TWEAKV_L .req d20
506     - TWEAKV_H .req d21
507     -
508     - TMP0 .req q12
509     - TMP0_L .req d24
510     - TMP0_H .req d25
511     - TMP1 .req q13
512     - TMP2 .req q14
513     - TMP3 .req q15
514     -
515     - .align 4
516     -.Lror64_8_table:
517     - .byte 1, 2, 3, 4, 5, 6, 7, 0
518     -.Lror32_8_table:
519     - .byte 1, 2, 3, 0, 5, 6, 7, 4
520     -.Lrol64_8_table:
521     - .byte 7, 0, 1, 2, 3, 4, 5, 6
522     -.Lrol32_8_table:
523     - .byte 3, 0, 1, 2, 7, 4, 5, 6
524     -.Lgf128mul_table:
525     - .byte 0, 0x87
526     - .fill 14
527     -.Lgf64mul_table:
528     - .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
529     - .fill 12
530     -
531     -/*
532     - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
533     - *
534     - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
535     - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
536     - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
537     - *
538     - * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
539     - * the vtbl approach is faster on some processors and the same speed on others.
540     - */
541     -.macro _speck_round_128bytes n
542     -
543     - // x = ror(x, 8)
544     - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
545     - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
546     - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
547     - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
548     - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
549     - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
550     - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
551     - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
552     -
553     - // x += y
554     - vadd.u\n X0, Y0
555     - vadd.u\n X1, Y1
556     - vadd.u\n X2, Y2
557     - vadd.u\n X3, Y3
558     -
559     - // x ^= k
560     - veor X0, ROUND_KEY
561     - veor X1, ROUND_KEY
562     - veor X2, ROUND_KEY
563     - veor X3, ROUND_KEY
564     -
565     - // y = rol(y, 3)
566     - vshl.u\n TMP0, Y0, #3
567     - vshl.u\n TMP1, Y1, #3
568     - vshl.u\n TMP2, Y2, #3
569     - vshl.u\n TMP3, Y3, #3
570     - vsri.u\n TMP0, Y0, #(\n - 3)
571     - vsri.u\n TMP1, Y1, #(\n - 3)
572     - vsri.u\n TMP2, Y2, #(\n - 3)
573     - vsri.u\n TMP3, Y3, #(\n - 3)
574     -
575     - // y ^= x
576     - veor Y0, TMP0, X0
577     - veor Y1, TMP1, X1
578     - veor Y2, TMP2, X2
579     - veor Y3, TMP3, X3
580     -.endm
581     -
582     -/*
583     - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
584     - *
585     - * This is the inverse of _speck_round_128bytes().
586     - */
587     -.macro _speck_unround_128bytes n
588     -
589     - // y ^= x
590     - veor TMP0, Y0, X0
591     - veor TMP1, Y1, X1
592     - veor TMP2, Y2, X2
593     - veor TMP3, Y3, X3
594     -
595     - // y = ror(y, 3)
596     - vshr.u\n Y0, TMP0, #3
597     - vshr.u\n Y1, TMP1, #3
598     - vshr.u\n Y2, TMP2, #3
599     - vshr.u\n Y3, TMP3, #3
600     - vsli.u\n Y0, TMP0, #(\n - 3)
601     - vsli.u\n Y1, TMP1, #(\n - 3)
602     - vsli.u\n Y2, TMP2, #(\n - 3)
603     - vsli.u\n Y3, TMP3, #(\n - 3)
604     -
605     - // x ^= k
606     - veor X0, ROUND_KEY
607     - veor X1, ROUND_KEY
608     - veor X2, ROUND_KEY
609     - veor X3, ROUND_KEY
610     -
611     - // x -= y
612     - vsub.u\n X0, Y0
613     - vsub.u\n X1, Y1
614     - vsub.u\n X2, Y2
615     - vsub.u\n X3, Y3
616     -
617     - // x = rol(x, 8);
618     - vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
619     - vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
620     - vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
621     - vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
622     - vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
623     - vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
624     - vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
625     - vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
626     -.endm
627     -
628     -.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
629     -
630     - // Load the next source block
631     - vld1.8 {\dst_reg}, [SRC]!
632     -
633     - // Save the current tweak in the tweak buffer
634     - vst1.8 {TWEAKV}, [\tweak_buf:128]!
635     -
636     - // XOR the next source block with the current tweak
637     - veor \dst_reg, TWEAKV
638     -
639     - /*
640     - * Calculate the next tweak by multiplying the current one by x,
641     - * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
642     - */
643     - vshr.u64 \tmp, TWEAKV, #63
644     - vshl.u64 TWEAKV, #1
645     - veor TWEAKV_H, \tmp\()_L
646     - vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
647     - veor TWEAKV_L, \tmp\()_H
648     -.endm
649     -
650     -.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
651     -
652     - // Load the next two source blocks
653     - vld1.8 {\dst_reg}, [SRC]!
654     -
655     - // Save the current two tweaks in the tweak buffer
656     - vst1.8 {TWEAKV}, [\tweak_buf:128]!
657     -
658     - // XOR the next two source blocks with the current two tweaks
659     - veor \dst_reg, TWEAKV
660     -
661     - /*
662     - * Calculate the next two tweaks by multiplying the current ones by x^2,
663     - * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
664     - */
665     - vshr.u64 \tmp, TWEAKV, #62
666     - vshl.u64 TWEAKV, #2
667     - vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
668     - vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
669     - veor TWEAKV, \tmp
670     -.endm
671     -
672     -/*
673     - * _speck_xts_crypt() - Speck-XTS encryption/decryption
674     - *
675     - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
676     - * using Speck-XTS, specifically the variant with a block size of '2n' and round
677     - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
678     - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
679     - * nonzero multiple of 128.
680     - */
681     -.macro _speck_xts_crypt n, decrypting
682     - push {r4-r7}
683     - mov r7, sp
684     -
685     - /*
686     - * The first four parameters were passed in registers r0-r3. Load the
687     - * additional parameters, which were passed on the stack.
688     - */
689     - ldr NBYTES, [sp, #16]
690     - ldr TWEAK, [sp, #20]
691     -
692     - /*
693     - * If decrypting, modify the ROUND_KEYS parameter to point to the last
694     - * round key rather than the first, since for decryption the round keys
695     - * are used in reverse order.
696     - */
697     -.if \decrypting
698     -.if \n == 64
699     - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
700     - sub ROUND_KEYS, #8
701     -.else
702     - add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
703     - sub ROUND_KEYS, #4
704     -.endif
705     -.endif
706     -
707     - // Load the index vector for vtbl-based 8-bit rotates
708     -.if \decrypting
709     - ldr r12, =.Lrol\n\()_8_table
710     -.else
711     - ldr r12, =.Lror\n\()_8_table
712     -.endif
713     - vld1.8 {ROTATE_TABLE}, [r12:64]
714     -
715     - // One-time XTS preparation
716     -
717     - /*
718     - * Allocate stack space to store 128 bytes worth of tweaks. For
719     - * performance, this space is aligned to a 16-byte boundary so that we
720     - * can use the load/store instructions that declare 16-byte alignment.
721     - * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
722     - */
723     - sub r12, sp, #128
724     - bic r12, #0xf
725     - mov sp, r12
726     -
727     -.if \n == 64
728     - // Load first tweak
729     - vld1.8 {TWEAKV}, [TWEAK]
730     -
731     - // Load GF(2^128) multiplication table
732     - ldr r12, =.Lgf128mul_table
733     - vld1.8 {GF128MUL_TABLE}, [r12:64]
734     -.else
735     - // Load first tweak
736     - vld1.8 {TWEAKV_L}, [TWEAK]
737     -
738     - // Load GF(2^64) multiplication table
739     - ldr r12, =.Lgf64mul_table
740     - vld1.8 {GF64MUL_TABLE}, [r12:64]
741     -
742     - // Calculate second tweak, packing it together with the first
743     - vshr.u64 TMP0_L, TWEAKV_L, #63
744     - vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
745     - vshl.u64 TWEAKV_H, TWEAKV_L, #1
746     - veor TWEAKV_H, TMP0_L
747     -.endif
748     -
749     -.Lnext_128bytes_\@:
750     -
751     - /*
752     - * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
753     - * values, and save the tweaks on the stack for later. Then
754     - * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
755     - * that the X[0-3] registers contain only the second halves of blocks,
756     - * and the Y[0-3] registers contain only the first halves of blocks.
757     - * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
758     - */
759     - mov r12, sp
760     -.if \n == 64
761     - _xts128_precrypt_one X0, r12, TMP0
762     - _xts128_precrypt_one Y0, r12, TMP0
763     - _xts128_precrypt_one X1, r12, TMP0
764     - _xts128_precrypt_one Y1, r12, TMP0
765     - _xts128_precrypt_one X2, r12, TMP0
766     - _xts128_precrypt_one Y2, r12, TMP0
767     - _xts128_precrypt_one X3, r12, TMP0
768     - _xts128_precrypt_one Y3, r12, TMP0
769     - vswp X0_L, Y0_H
770     - vswp X1_L, Y1_H
771     - vswp X2_L, Y2_H
772     - vswp X3_L, Y3_H
773     -.else
774     - _xts64_precrypt_two X0, r12, TMP0
775     - _xts64_precrypt_two Y0, r12, TMP0
776     - _xts64_precrypt_two X1, r12, TMP0
777     - _xts64_precrypt_two Y1, r12, TMP0
778     - _xts64_precrypt_two X2, r12, TMP0
779     - _xts64_precrypt_two Y2, r12, TMP0
780     - _xts64_precrypt_two X3, r12, TMP0
781     - _xts64_precrypt_two Y3, r12, TMP0
782     - vuzp.32 Y0, X0
783     - vuzp.32 Y1, X1
784     - vuzp.32 Y2, X2
785     - vuzp.32 Y3, X3
786     -.endif
787     -
788     - // Do the cipher rounds
789     -
790     - mov r12, ROUND_KEYS
791     - mov r6, NROUNDS
792     -
793     -.Lnext_round_\@:
794     -.if \decrypting
795     -.if \n == 64
796     - vld1.64 ROUND_KEY_L, [r12]
797     - sub r12, #8
798     - vmov ROUND_KEY_H, ROUND_KEY_L
799     -.else
800     - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
801     - sub r12, #4
802     -.endif
803     - _speck_unround_128bytes \n
804     -.else
805     -.if \n == 64
806     - vld1.64 ROUND_KEY_L, [r12]!
807     - vmov ROUND_KEY_H, ROUND_KEY_L
808     -.else
809     - vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
810     -.endif
811     - _speck_round_128bytes \n
812     -.endif
813     - subs r6, r6, #1
814     - bne .Lnext_round_\@
815     -
816     - // Re-interleave the 'x' and 'y' elements of each block
817     -.if \n == 64
818     - vswp X0_L, Y0_H
819     - vswp X1_L, Y1_H
820     - vswp X2_L, Y2_H
821     - vswp X3_L, Y3_H
822     -.else
823     - vzip.32 Y0, X0
824     - vzip.32 Y1, X1
825     - vzip.32 Y2, X2
826     - vzip.32 Y3, X3
827     -.endif
828     -
829     - // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
830     - mov r12, sp
831     - vld1.8 {TMP0, TMP1}, [r12:128]!
832     - vld1.8 {TMP2, TMP3}, [r12:128]!
833     - veor X0, TMP0
834     - veor Y0, TMP1
835     - veor X1, TMP2
836     - veor Y1, TMP3
837     - vld1.8 {TMP0, TMP1}, [r12:128]!
838     - vld1.8 {TMP2, TMP3}, [r12:128]!
839     - veor X2, TMP0
840     - veor Y2, TMP1
841     - veor X3, TMP2
842     - veor Y3, TMP3
843     -
844     - // Store the ciphertext in the destination buffer
845     - vst1.8 {X0, Y0}, [DST]!
846     - vst1.8 {X1, Y1}, [DST]!
847     - vst1.8 {X2, Y2}, [DST]!
848     - vst1.8 {X3, Y3}, [DST]!
849     -
850     - // Continue if there are more 128-byte chunks remaining, else return
851     - subs NBYTES, #128
852     - bne .Lnext_128bytes_\@
853     -
854     - // Store the next tweak
855     -.if \n == 64
856     - vst1.8 {TWEAKV}, [TWEAK]
857     -.else
858     - vst1.8 {TWEAKV_L}, [TWEAK]
859     -.endif
860     -
861     - mov sp, r7
862     - pop {r4-r7}
863     - bx lr
864     -.endm
865     -
866     -ENTRY(speck128_xts_encrypt_neon)
867     - _speck_xts_crypt n=64, decrypting=0
868     -ENDPROC(speck128_xts_encrypt_neon)
869     -
870     -ENTRY(speck128_xts_decrypt_neon)
871     - _speck_xts_crypt n=64, decrypting=1
872     -ENDPROC(speck128_xts_decrypt_neon)
873     -
874     -ENTRY(speck64_xts_encrypt_neon)
875     - _speck_xts_crypt n=32, decrypting=0
876     -ENDPROC(speck64_xts_encrypt_neon)
877     -
878     -ENTRY(speck64_xts_decrypt_neon)
879     - _speck_xts_crypt n=32, decrypting=1
880     -ENDPROC(speck64_xts_decrypt_neon)
881     diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
882     deleted file mode 100644
883     index f012c3ea998f..000000000000
884     --- a/arch/arm/crypto/speck-neon-glue.c
885     +++ /dev/null
886     @@ -1,288 +0,0 @@
887     -// SPDX-License-Identifier: GPL-2.0
888     -/*
889     - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
890     - *
891     - * Copyright (c) 2018 Google, Inc
892     - *
893     - * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
894     - * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
895     - * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
896     - * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
897     - * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
898     - * OCB and PMAC"), represented as 0x1B.
899     - */
900     -
901     -#include <asm/hwcap.h>
902     -#include <asm/neon.h>
903     -#include <asm/simd.h>
904     -#include <crypto/algapi.h>
905     -#include <crypto/gf128mul.h>
906     -#include <crypto/internal/skcipher.h>
907     -#include <crypto/speck.h>
908     -#include <crypto/xts.h>
909     -#include <linux/kernel.h>
910     -#include <linux/module.h>
911     -
912     -/* The assembly functions only handle multiples of 128 bytes */
913     -#define SPECK_NEON_CHUNK_SIZE 128
914     -
915     -/* Speck128 */
916     -
917     -struct speck128_xts_tfm_ctx {
918     - struct speck128_tfm_ctx main_key;
919     - struct speck128_tfm_ctx tweak_key;
920     -};
921     -
922     -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
923     - void *dst, const void *src,
924     - unsigned int nbytes, void *tweak);
925     -
926     -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
927     - void *dst, const void *src,
928     - unsigned int nbytes, void *tweak);
929     -
930     -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
931     - u8 *, const u8 *);
932     -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
933     - const void *, unsigned int, void *);
934     -
935     -static __always_inline int
936     -__speck128_xts_crypt(struct skcipher_request *req,
937     - speck128_crypt_one_t crypt_one,
938     - speck128_xts_crypt_many_t crypt_many)
939     -{
940     - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
941     - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
942     - struct skcipher_walk walk;
943     - le128 tweak;
944     - int err;
945     -
946     - err = skcipher_walk_virt(&walk, req, true);
947     -
948     - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
949     -
950     - while (walk.nbytes > 0) {
951     - unsigned int nbytes = walk.nbytes;
952     - u8 *dst = walk.dst.virt.addr;
953     - const u8 *src = walk.src.virt.addr;
954     -
955     - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
956     - unsigned int count;
957     -
958     - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
959     - kernel_neon_begin();
960     - (*crypt_many)(ctx->main_key.round_keys,
961     - ctx->main_key.nrounds,
962     - dst, src, count, &tweak);
963     - kernel_neon_end();
964     - dst += count;
965     - src += count;
966     - nbytes -= count;
967     - }
968     -
969     - /* Handle any remainder with generic code */
970     - while (nbytes >= sizeof(tweak)) {
971     - le128_xor((le128 *)dst, (const le128 *)src, &tweak);
972     - (*crypt_one)(&ctx->main_key, dst, dst);
973     - le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
974     - gf128mul_x_ble(&tweak, &tweak);
975     -
976     - dst += sizeof(tweak);
977     - src += sizeof(tweak);
978     - nbytes -= sizeof(tweak);
979     - }
980     - err = skcipher_walk_done(&walk, nbytes);
981     - }
982     -
983     - return err;
984     -}
985     -
986     -static int speck128_xts_encrypt(struct skcipher_request *req)
987     -{
988     - return __speck128_xts_crypt(req, crypto_speck128_encrypt,
989     - speck128_xts_encrypt_neon);
990     -}
991     -
992     -static int speck128_xts_decrypt(struct skcipher_request *req)
993     -{
994     - return __speck128_xts_crypt(req, crypto_speck128_decrypt,
995     - speck128_xts_decrypt_neon);
996     -}
997     -
998     -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
999     - unsigned int keylen)
1000     -{
1001     - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
1002     - int err;
1003     -
1004     - err = xts_verify_key(tfm, key, keylen);
1005     - if (err)
1006     - return err;
1007     -
1008     - keylen /= 2;
1009     -
1010     - err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
1011     - if (err)
1012     - return err;
1013     -
1014     - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
1015     -}
1016     -
1017     -/* Speck64 */
1018     -
1019     -struct speck64_xts_tfm_ctx {
1020     - struct speck64_tfm_ctx main_key;
1021     - struct speck64_tfm_ctx tweak_key;
1022     -};
1023     -
1024     -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
1025     - void *dst, const void *src,
1026     - unsigned int nbytes, void *tweak);
1027     -
1028     -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
1029     - void *dst, const void *src,
1030     - unsigned int nbytes, void *tweak);
1031     -
1032     -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
1033     - u8 *, const u8 *);
1034     -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
1035     - const void *, unsigned int, void *);
1036     -
1037     -static __always_inline int
1038     -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
1039     - speck64_xts_crypt_many_t crypt_many)
1040     -{
1041     - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1042     - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
1043     - struct skcipher_walk walk;
1044     - __le64 tweak;
1045     - int err;
1046     -
1047     - err = skcipher_walk_virt(&walk, req, true);
1048     -
1049     - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
1050     -
1051     - while (walk.nbytes > 0) {
1052     - unsigned int nbytes = walk.nbytes;
1053     - u8 *dst = walk.dst.virt.addr;
1054     - const u8 *src = walk.src.virt.addr;
1055     -
1056     - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
1057     - unsigned int count;
1058     -
1059     - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
1060     - kernel_neon_begin();
1061     - (*crypt_many)(ctx->main_key.round_keys,
1062     - ctx->main_key.nrounds,
1063     - dst, src, count, &tweak);
1064     - kernel_neon_end();
1065     - dst += count;
1066     - src += count;
1067     - nbytes -= count;
1068     - }
1069     -
1070     - /* Handle any remainder with generic code */
1071     - while (nbytes >= sizeof(tweak)) {
1072     - *(__le64 *)dst = *(__le64 *)src ^ tweak;
1073     - (*crypt_one)(&ctx->main_key, dst, dst);
1074     - *(__le64 *)dst ^= tweak;
1075     - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
1076     - ((tweak & cpu_to_le64(1ULL << 63)) ?
1077     - 0x1B : 0));
1078     - dst += sizeof(tweak);
1079     - src += sizeof(tweak);
1080     - nbytes -= sizeof(tweak);
1081     - }
1082     - err = skcipher_walk_done(&walk, nbytes);
1083     - }
1084     -
1085     - return err;
1086     -}
1087     -
1088     -static int speck64_xts_encrypt(struct skcipher_request *req)
1089     -{
1090     - return __speck64_xts_crypt(req, crypto_speck64_encrypt,
1091     - speck64_xts_encrypt_neon);
1092     -}
1093     -
1094     -static int speck64_xts_decrypt(struct skcipher_request *req)
1095     -{
1096     - return __speck64_xts_crypt(req, crypto_speck64_decrypt,
1097     - speck64_xts_decrypt_neon);
1098     -}
1099     -
1100     -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
1101     - unsigned int keylen)
1102     -{
1103     - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
1104     - int err;
1105     -
1106     - err = xts_verify_key(tfm, key, keylen);
1107     - if (err)
1108     - return err;
1109     -
1110     - keylen /= 2;
1111     -
1112     - err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
1113     - if (err)
1114     - return err;
1115     -
1116     - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
1117     -}
1118     -
1119     -static struct skcipher_alg speck_algs[] = {
1120     - {
1121     - .base.cra_name = "xts(speck128)",
1122     - .base.cra_driver_name = "xts-speck128-neon",
1123     - .base.cra_priority = 300,
1124     - .base.cra_blocksize = SPECK128_BLOCK_SIZE,
1125     - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
1126     - .base.cra_alignmask = 7,
1127     - .base.cra_module = THIS_MODULE,
1128     - .min_keysize = 2 * SPECK128_128_KEY_SIZE,
1129     - .max_keysize = 2 * SPECK128_256_KEY_SIZE,
1130     - .ivsize = SPECK128_BLOCK_SIZE,
1131     - .walksize = SPECK_NEON_CHUNK_SIZE,
1132     - .setkey = speck128_xts_setkey,
1133     - .encrypt = speck128_xts_encrypt,
1134     - .decrypt = speck128_xts_decrypt,
1135     - }, {
1136     - .base.cra_name = "xts(speck64)",
1137     - .base.cra_driver_name = "xts-speck64-neon",
1138     - .base.cra_priority = 300,
1139     - .base.cra_blocksize = SPECK64_BLOCK_SIZE,
1140     - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
1141     - .base.cra_alignmask = 7,
1142     - .base.cra_module = THIS_MODULE,
1143     - .min_keysize = 2 * SPECK64_96_KEY_SIZE,
1144     - .max_keysize = 2 * SPECK64_128_KEY_SIZE,
1145     - .ivsize = SPECK64_BLOCK_SIZE,
1146     - .walksize = SPECK_NEON_CHUNK_SIZE,
1147     - .setkey = speck64_xts_setkey,
1148     - .encrypt = speck64_xts_encrypt,
1149     - .decrypt = speck64_xts_decrypt,
1150     - }
1151     -};
1152     -
1153     -static int __init speck_neon_module_init(void)
1154     -{
1155     - if (!(elf_hwcap & HWCAP_NEON))
1156     - return -ENODEV;
1157     - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
1158     -}
1159     -
1160     -static void __exit speck_neon_module_exit(void)
1161     -{
1162     - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
1163     -}
1164     -
1165     -module_init(speck_neon_module_init);
1166     -module_exit(speck_neon_module_exit);
1167     -
1168     -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
1169     -MODULE_LICENSE("GPL");
1170     -MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
1171     -MODULE_ALIAS_CRYPTO("xts(speck128)");
1172     -MODULE_ALIAS_CRYPTO("xts-speck128-neon");
1173     -MODULE_ALIAS_CRYPTO("xts(speck64)");
1174     -MODULE_ALIAS_CRYPTO("xts-speck64-neon");
1175     diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
1176     index d033da401c26..bc6c141d7372 100644
1177     --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
1178     +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
1179     @@ -335,7 +335,7 @@
1180    
1181     sysmgr: sysmgr@ffd12000 {
1182     compatible = "altr,sys-mgr", "syscon";
1183     - reg = <0xffd12000 0x1000>;
1184     + reg = <0xffd12000 0x228>;
1185     };
1186    
1187     /* Local timer */
1188     diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
1189     index e3fdb0fd6f70..d51944ff9f91 100644
1190     --- a/arch/arm64/crypto/Kconfig
1191     +++ b/arch/arm64/crypto/Kconfig
1192     @@ -119,10 +119,4 @@ config CRYPTO_AES_ARM64_BS
1193     select CRYPTO_AES_ARM64
1194     select CRYPTO_SIMD
1195    
1196     -config CRYPTO_SPECK_NEON
1197     - tristate "NEON accelerated Speck cipher algorithms"
1198     - depends on KERNEL_MODE_NEON
1199     - select CRYPTO_BLKCIPHER
1200     - select CRYPTO_SPECK
1201     -
1202     endif
1203     diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
1204     index bcafd016618e..7bc4bda6d9c6 100644
1205     --- a/arch/arm64/crypto/Makefile
1206     +++ b/arch/arm64/crypto/Makefile
1207     @@ -56,9 +56,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
1208     obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
1209     chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
1210    
1211     -obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
1212     -speck-neon-y := speck-neon-core.o speck-neon-glue.o
1213     -
1214     obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
1215     aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
1216    
1217     diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
1218     deleted file mode 100644
1219     index b14463438b09..000000000000
1220     --- a/arch/arm64/crypto/speck-neon-core.S
1221     +++ /dev/null
1222     @@ -1,352 +0,0 @@
1223     -// SPDX-License-Identifier: GPL-2.0
1224     -/*
1225     - * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
1226     - *
1227     - * Copyright (c) 2018 Google, Inc
1228     - *
1229     - * Author: Eric Biggers <ebiggers@google.com>
1230     - */
1231     -
1232     -#include <linux/linkage.h>
1233     -
1234     - .text
1235     -
1236     - // arguments
1237     - ROUND_KEYS .req x0 // const {u64,u32} *round_keys
1238     - NROUNDS .req w1 // int nrounds
1239     - NROUNDS_X .req x1
1240     - DST .req x2 // void *dst
1241     - SRC .req x3 // const void *src
1242     - NBYTES .req w4 // unsigned int nbytes
1243     - TWEAK .req x5 // void *tweak
1244     -
1245     - // registers which hold the data being encrypted/decrypted
1246     - // (underscores avoid a naming collision with ARM64 registers x0-x3)
1247     - X_0 .req v0
1248     - Y_0 .req v1
1249     - X_1 .req v2
1250     - Y_1 .req v3
1251     - X_2 .req v4
1252     - Y_2 .req v5
1253     - X_3 .req v6
1254     - Y_3 .req v7
1255     -
1256     - // the round key, duplicated in all lanes
1257     - ROUND_KEY .req v8
1258     -
1259     - // index vector for tbl-based 8-bit rotates
1260     - ROTATE_TABLE .req v9
1261     - ROTATE_TABLE_Q .req q9
1262     -
1263     - // temporary registers
1264     - TMP0 .req v10
1265     - TMP1 .req v11
1266     - TMP2 .req v12
1267     - TMP3 .req v13
1268     -
1269     - // multiplication table for updating XTS tweaks
1270     - GFMUL_TABLE .req v14
1271     - GFMUL_TABLE_Q .req q14
1272     -
1273     - // next XTS tweak value(s)
1274     - TWEAKV_NEXT .req v15
1275     -
1276     - // XTS tweaks for the blocks currently being encrypted/decrypted
1277     - TWEAKV0 .req v16
1278     - TWEAKV1 .req v17
1279     - TWEAKV2 .req v18
1280     - TWEAKV3 .req v19
1281     - TWEAKV4 .req v20
1282     - TWEAKV5 .req v21
1283     - TWEAKV6 .req v22
1284     - TWEAKV7 .req v23
1285     -
1286     - .align 4
1287     -.Lror64_8_table:
1288     - .octa 0x080f0e0d0c0b0a090007060504030201
1289     -.Lror32_8_table:
1290     - .octa 0x0c0f0e0d080b0a090407060500030201
1291     -.Lrol64_8_table:
1292     - .octa 0x0e0d0c0b0a09080f0605040302010007
1293     -.Lrol32_8_table:
1294     - .octa 0x0e0d0c0f0a09080b0605040702010003
1295     -.Lgf128mul_table:
1296     - .octa 0x00000000000000870000000000000001
1297     -.Lgf64mul_table:
1298     - .octa 0x0000000000000000000000002d361b00
1299     -
1300     -/*
1301     - * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
1302     - *
1303     - * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
1304     - * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
1305     - * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
1306     - * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
1307     - */
1308     -.macro _speck_round_128bytes n, lanes
1309     -
1310     - // x = ror(x, 8)
1311     - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
1312     - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
1313     - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
1314     - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
1315     -
1316     - // x += y
1317     - add X_0.\lanes, X_0.\lanes, Y_0.\lanes
1318     - add X_1.\lanes, X_1.\lanes, Y_1.\lanes
1319     - add X_2.\lanes, X_2.\lanes, Y_2.\lanes
1320     - add X_3.\lanes, X_3.\lanes, Y_3.\lanes
1321     -
1322     - // x ^= k
1323     - eor X_0.16b, X_0.16b, ROUND_KEY.16b
1324     - eor X_1.16b, X_1.16b, ROUND_KEY.16b
1325     - eor X_2.16b, X_2.16b, ROUND_KEY.16b
1326     - eor X_3.16b, X_3.16b, ROUND_KEY.16b
1327     -
1328     - // y = rol(y, 3)
1329     - shl TMP0.\lanes, Y_0.\lanes, #3
1330     - shl TMP1.\lanes, Y_1.\lanes, #3
1331     - shl TMP2.\lanes, Y_2.\lanes, #3
1332     - shl TMP3.\lanes, Y_3.\lanes, #3
1333     - sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
1334     - sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
1335     - sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
1336     - sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
1337     -
1338     - // y ^= x
1339     - eor Y_0.16b, TMP0.16b, X_0.16b
1340     - eor Y_1.16b, TMP1.16b, X_1.16b
1341     - eor Y_2.16b, TMP2.16b, X_2.16b
1342     - eor Y_3.16b, TMP3.16b, X_3.16b
1343     -.endm
1344     -
1345     -/*
1346     - * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
1347     - *
1348     - * This is the inverse of _speck_round_128bytes().
1349     - */
1350     -.macro _speck_unround_128bytes n, lanes
1351     -
1352     - // y ^= x
1353     - eor TMP0.16b, Y_0.16b, X_0.16b
1354     - eor TMP1.16b, Y_1.16b, X_1.16b
1355     - eor TMP2.16b, Y_2.16b, X_2.16b
1356     - eor TMP3.16b, Y_3.16b, X_3.16b
1357     -
1358     - // y = ror(y, 3)
1359     - ushr Y_0.\lanes, TMP0.\lanes, #3
1360     - ushr Y_1.\lanes, TMP1.\lanes, #3
1361     - ushr Y_2.\lanes, TMP2.\lanes, #3
1362     - ushr Y_3.\lanes, TMP3.\lanes, #3
1363     - sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
1364     - sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
1365     - sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
1366     - sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
1367     -
1368     - // x ^= k
1369     - eor X_0.16b, X_0.16b, ROUND_KEY.16b
1370     - eor X_1.16b, X_1.16b, ROUND_KEY.16b
1371     - eor X_2.16b, X_2.16b, ROUND_KEY.16b
1372     - eor X_3.16b, X_3.16b, ROUND_KEY.16b
1373     -
1374     - // x -= y
1375     - sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
1376     - sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
1377     - sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
1378     - sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
1379     -
1380     - // x = rol(x, 8)
1381     - tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
1382     - tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
1383     - tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
1384     - tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
1385     -.endm
1386     -
1387     -.macro _next_xts_tweak next, cur, tmp, n
1388     -.if \n == 64
1389     - /*
1390     - * Calculate the next tweak by multiplying the current one by x,
1391     - * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
1392     - */
1393     - sshr \tmp\().2d, \cur\().2d, #63
1394     - and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
1395     - shl \next\().2d, \cur\().2d, #1
1396     - ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
1397     - eor \next\().16b, \next\().16b, \tmp\().16b
1398     -.else
1399     - /*
1400     - * Calculate the next two tweaks by multiplying the current ones by x^2,
1401     - * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
1402     - */
1403     - ushr \tmp\().2d, \cur\().2d, #62
1404     - shl \next\().2d, \cur\().2d, #2
1405     - tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
1406     - eor \next\().16b, \next\().16b, \tmp\().16b
1407     -.endif
1408     -.endm
1409     -
1410     -/*
1411     - * _speck_xts_crypt() - Speck-XTS encryption/decryption
1412     - *
1413     - * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
1414     - * using Speck-XTS, specifically the variant with a block size of '2n' and round
1415     - * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
1416     - * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
1417     - * nonzero multiple of 128.
1418     - */
1419     -.macro _speck_xts_crypt n, lanes, decrypting
1420     -
1421     - /*
1422     - * If decrypting, modify the ROUND_KEYS parameter to point to the last
1423     - * round key rather than the first, since for decryption the round keys
1424     - * are used in reverse order.
1425     - */
1426     -.if \decrypting
1427     - mov NROUNDS, NROUNDS /* zero the high 32 bits */
1428     -.if \n == 64
1429     - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
1430     - sub ROUND_KEYS, ROUND_KEYS, #8
1431     -.else
1432     - add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
1433     - sub ROUND_KEYS, ROUND_KEYS, #4
1434     -.endif
1435     -.endif
1436     -
1437     - // Load the index vector for tbl-based 8-bit rotates
1438     -.if \decrypting
1439     - ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
1440     -.else
1441     - ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
1442     -.endif
1443     -
1444     - // One-time XTS preparation
1445     -.if \n == 64
1446     - // Load first tweak
1447     - ld1 {TWEAKV0.16b}, [TWEAK]
1448     -
1449     - // Load GF(2^128) multiplication table
1450     - ldr GFMUL_TABLE_Q, .Lgf128mul_table
1451     -.else
1452     - // Load first tweak
1453     - ld1 {TWEAKV0.8b}, [TWEAK]
1454     -
1455     - // Load GF(2^64) multiplication table
1456     - ldr GFMUL_TABLE_Q, .Lgf64mul_table
1457     -
1458     - // Calculate second tweak, packing it together with the first
1459     - ushr TMP0.2d, TWEAKV0.2d, #63
1460     - shl TMP1.2d, TWEAKV0.2d, #1
1461     - tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
1462     - eor TMP0.8b, TMP0.8b, TMP1.8b
1463     - mov TWEAKV0.d[1], TMP0.d[0]
1464     -.endif
1465     -
1466     -.Lnext_128bytes_\@:
1467     -
1468     - // Calculate XTS tweaks for next 128 bytes
1469     - _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
1470     - _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
1471     - _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
1472     - _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
1473     - _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
1474     - _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
1475     - _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
1476     - _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
1477     -
1478     - // Load the next source blocks into {X,Y}[0-3]
1479     - ld1 {X_0.16b-Y_1.16b}, [SRC], #64
1480     - ld1 {X_2.16b-Y_3.16b}, [SRC], #64
1481     -
1482     - // XOR the source blocks with their XTS tweaks
1483     - eor TMP0.16b, X_0.16b, TWEAKV0.16b
1484     - eor Y_0.16b, Y_0.16b, TWEAKV1.16b
1485     - eor TMP1.16b, X_1.16b, TWEAKV2.16b
1486     - eor Y_1.16b, Y_1.16b, TWEAKV3.16b
1487     - eor TMP2.16b, X_2.16b, TWEAKV4.16b
1488     - eor Y_2.16b, Y_2.16b, TWEAKV5.16b
1489     - eor TMP3.16b, X_3.16b, TWEAKV6.16b
1490     - eor Y_3.16b, Y_3.16b, TWEAKV7.16b
1491     -
1492     - /*
1493     - * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
1494     - * that the X[0-3] registers contain only the second halves of blocks,
1495     - * and the Y[0-3] registers contain only the first halves of blocks.
1496     - * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
1497     - */
1498     - uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
1499     - uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
1500     - uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
1501     - uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
1502     - uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
1503     - uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
1504     - uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
1505     - uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
1506     -
1507     - // Do the cipher rounds
1508     - mov x6, ROUND_KEYS
1509     - mov w7, NROUNDS
1510     -.Lnext_round_\@:
1511     -.if \decrypting
1512     - ld1r {ROUND_KEY.\lanes}, [x6]
1513     - sub x6, x6, #( \n / 8 )
1514     - _speck_unround_128bytes \n, \lanes
1515     -.else
1516     - ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
1517     - _speck_round_128bytes \n, \lanes
1518     -.endif
1519     - subs w7, w7, #1
1520     - bne .Lnext_round_\@
1521     -
1522     - // Re-interleave the 'x' and 'y' elements of each block
1523     - zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
1524     - zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
1525     - zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
1526     - zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
1527     - zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
1528     - zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
1529     - zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
1530     - zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
1531     -
1532     - // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
1533     - eor X_0.16b, TMP0.16b, TWEAKV0.16b
1534     - eor Y_0.16b, Y_0.16b, TWEAKV1.16b
1535     - eor X_1.16b, TMP1.16b, TWEAKV2.16b
1536     - eor Y_1.16b, Y_1.16b, TWEAKV3.16b
1537     - eor X_2.16b, TMP2.16b, TWEAKV4.16b
1538     - eor Y_2.16b, Y_2.16b, TWEAKV5.16b
1539     - eor X_3.16b, TMP3.16b, TWEAKV6.16b
1540     - eor Y_3.16b, Y_3.16b, TWEAKV7.16b
1541     - mov TWEAKV0.16b, TWEAKV_NEXT.16b
1542     -
1543     - // Store the ciphertext in the destination buffer
1544     - st1 {X_0.16b-Y_1.16b}, [DST], #64
1545     - st1 {X_2.16b-Y_3.16b}, [DST], #64
1546     -
1547     - // Continue if there are more 128-byte chunks remaining
1548     - subs NBYTES, NBYTES, #128
1549     - bne .Lnext_128bytes_\@
1550     -
1551     - // Store the next tweak and return
1552     -.if \n == 64
1553     - st1 {TWEAKV_NEXT.16b}, [TWEAK]
1554     -.else
1555     - st1 {TWEAKV_NEXT.8b}, [TWEAK]
1556     -.endif
1557     - ret
1558     -.endm
1559     -
1560     -ENTRY(speck128_xts_encrypt_neon)
1561     - _speck_xts_crypt n=64, lanes=2d, decrypting=0
1562     -ENDPROC(speck128_xts_encrypt_neon)
1563     -
1564     -ENTRY(speck128_xts_decrypt_neon)
1565     - _speck_xts_crypt n=64, lanes=2d, decrypting=1
1566     -ENDPROC(speck128_xts_decrypt_neon)
1567     -
1568     -ENTRY(speck64_xts_encrypt_neon)
1569     - _speck_xts_crypt n=32, lanes=4s, decrypting=0
1570     -ENDPROC(speck64_xts_encrypt_neon)
1571     -
1572     -ENTRY(speck64_xts_decrypt_neon)
1573     - _speck_xts_crypt n=32, lanes=4s, decrypting=1
1574     -ENDPROC(speck64_xts_decrypt_neon)
1575     diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
1576     deleted file mode 100644
1577     index 6e233aeb4ff4..000000000000
1578     --- a/arch/arm64/crypto/speck-neon-glue.c
1579     +++ /dev/null
1580     @@ -1,282 +0,0 @@
1581     -// SPDX-License-Identifier: GPL-2.0
1582     -/*
1583     - * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
1584     - * (64-bit version; based on the 32-bit version)
1585     - *
1586     - * Copyright (c) 2018 Google, Inc
1587     - */
1588     -
1589     -#include <asm/hwcap.h>
1590     -#include <asm/neon.h>
1591     -#include <asm/simd.h>
1592     -#include <crypto/algapi.h>
1593     -#include <crypto/gf128mul.h>
1594     -#include <crypto/internal/skcipher.h>
1595     -#include <crypto/speck.h>
1596     -#include <crypto/xts.h>
1597     -#include <linux/kernel.h>
1598     -#include <linux/module.h>
1599     -
1600     -/* The assembly functions only handle multiples of 128 bytes */
1601     -#define SPECK_NEON_CHUNK_SIZE 128
1602     -
1603     -/* Speck128 */
1604     -
1605     -struct speck128_xts_tfm_ctx {
1606     - struct speck128_tfm_ctx main_key;
1607     - struct speck128_tfm_ctx tweak_key;
1608     -};
1609     -
1610     -asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
1611     - void *dst, const void *src,
1612     - unsigned int nbytes, void *tweak);
1613     -
1614     -asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
1615     - void *dst, const void *src,
1616     - unsigned int nbytes, void *tweak);
1617     -
1618     -typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
1619     - u8 *, const u8 *);
1620     -typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
1621     - const void *, unsigned int, void *);
1622     -
1623     -static __always_inline int
1624     -__speck128_xts_crypt(struct skcipher_request *req,
1625     - speck128_crypt_one_t crypt_one,
1626     - speck128_xts_crypt_many_t crypt_many)
1627     -{
1628     - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1629     - const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
1630     - struct skcipher_walk walk;
1631     - le128 tweak;
1632     - int err;
1633     -
1634     - err = skcipher_walk_virt(&walk, req, true);
1635     -
1636     - crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
1637     -
1638     - while (walk.nbytes > 0) {
1639     - unsigned int nbytes = walk.nbytes;
1640     - u8 *dst = walk.dst.virt.addr;
1641     - const u8 *src = walk.src.virt.addr;
1642     -
1643     - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
1644     - unsigned int count;
1645     -
1646     - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
1647     - kernel_neon_begin();
1648     - (*crypt_many)(ctx->main_key.round_keys,
1649     - ctx->main_key.nrounds,
1650     - dst, src, count, &tweak);
1651     - kernel_neon_end();
1652     - dst += count;
1653     - src += count;
1654     - nbytes -= count;
1655     - }
1656     -
1657     - /* Handle any remainder with generic code */
1658     - while (nbytes >= sizeof(tweak)) {
1659     - le128_xor((le128 *)dst, (const le128 *)src, &tweak);
1660     - (*crypt_one)(&ctx->main_key, dst, dst);
1661     - le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
1662     - gf128mul_x_ble(&tweak, &tweak);
1663     -
1664     - dst += sizeof(tweak);
1665     - src += sizeof(tweak);
1666     - nbytes -= sizeof(tweak);
1667     - }
1668     - err = skcipher_walk_done(&walk, nbytes);
1669     - }
1670     -
1671     - return err;
1672     -}
1673     -
1674     -static int speck128_xts_encrypt(struct skcipher_request *req)
1675     -{
1676     - return __speck128_xts_crypt(req, crypto_speck128_encrypt,
1677     - speck128_xts_encrypt_neon);
1678     -}
1679     -
1680     -static int speck128_xts_decrypt(struct skcipher_request *req)
1681     -{
1682     - return __speck128_xts_crypt(req, crypto_speck128_decrypt,
1683     - speck128_xts_decrypt_neon);
1684     -}
1685     -
1686     -static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
1687     - unsigned int keylen)
1688     -{
1689     - struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
1690     - int err;
1691     -
1692     - err = xts_verify_key(tfm, key, keylen);
1693     - if (err)
1694     - return err;
1695     -
1696     - keylen /= 2;
1697     -
1698     - err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
1699     - if (err)
1700     - return err;
1701     -
1702     - return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
1703     -}
1704     -
1705     -/* Speck64 */
1706     -
1707     -struct speck64_xts_tfm_ctx {
1708     - struct speck64_tfm_ctx main_key;
1709     - struct speck64_tfm_ctx tweak_key;
1710     -};
1711     -
1712     -asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
1713     - void *dst, const void *src,
1714     - unsigned int nbytes, void *tweak);
1715     -
1716     -asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
1717     - void *dst, const void *src,
1718     - unsigned int nbytes, void *tweak);
1719     -
1720     -typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
1721     - u8 *, const u8 *);
1722     -typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
1723     - const void *, unsigned int, void *);
1724     -
1725     -static __always_inline int
1726     -__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
1727     - speck64_xts_crypt_many_t crypt_many)
1728     -{
1729     - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1730     - const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
1731     - struct skcipher_walk walk;
1732     - __le64 tweak;
1733     - int err;
1734     -
1735     - err = skcipher_walk_virt(&walk, req, true);
1736     -
1737     - crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
1738     -
1739     - while (walk.nbytes > 0) {
1740     - unsigned int nbytes = walk.nbytes;
1741     - u8 *dst = walk.dst.virt.addr;
1742     - const u8 *src = walk.src.virt.addr;
1743     -
1744     - if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
1745     - unsigned int count;
1746     -
1747     - count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
1748     - kernel_neon_begin();
1749     - (*crypt_many)(ctx->main_key.round_keys,
1750     - ctx->main_key.nrounds,
1751     - dst, src, count, &tweak);
1752     - kernel_neon_end();
1753     - dst += count;
1754     - src += count;
1755     - nbytes -= count;
1756     - }
1757     -
1758     - /* Handle any remainder with generic code */
1759     - while (nbytes >= sizeof(tweak)) {
1760     - *(__le64 *)dst = *(__le64 *)src ^ tweak;
1761     - (*crypt_one)(&ctx->main_key, dst, dst);
1762     - *(__le64 *)dst ^= tweak;
1763     - tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
1764     - ((tweak & cpu_to_le64(1ULL << 63)) ?
1765     - 0x1B : 0));
1766     - dst += sizeof(tweak);
1767     - src += sizeof(tweak);
1768     - nbytes -= sizeof(tweak);
1769     - }
1770     - err = skcipher_walk_done(&walk, nbytes);
1771     - }
1772     -
1773     - return err;
1774     -}
1775     -
1776     -static int speck64_xts_encrypt(struct skcipher_request *req)
1777     -{
1778     - return __speck64_xts_crypt(req, crypto_speck64_encrypt,
1779     - speck64_xts_encrypt_neon);
1780     -}
1781     -
1782     -static int speck64_xts_decrypt(struct skcipher_request *req)
1783     -{
1784     - return __speck64_xts_crypt(req, crypto_speck64_decrypt,
1785     - speck64_xts_decrypt_neon);
1786     -}
1787     -
1788     -static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
1789     - unsigned int keylen)
1790     -{
1791     - struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
1792     - int err;
1793     -
1794     - err = xts_verify_key(tfm, key, keylen);
1795     - if (err)
1796     - return err;
1797     -
1798     - keylen /= 2;
1799     -
1800     - err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
1801     - if (err)
1802     - return err;
1803     -
1804     - return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
1805     -}
1806     -
1807     -static struct skcipher_alg speck_algs[] = {
1808     - {
1809     - .base.cra_name = "xts(speck128)",
1810     - .base.cra_driver_name = "xts-speck128-neon",
1811     - .base.cra_priority = 300,
1812     - .base.cra_blocksize = SPECK128_BLOCK_SIZE,
1813     - .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
1814     - .base.cra_alignmask = 7,
1815     - .base.cra_module = THIS_MODULE,
1816     - .min_keysize = 2 * SPECK128_128_KEY_SIZE,
1817     - .max_keysize = 2 * SPECK128_256_KEY_SIZE,
1818     - .ivsize = SPECK128_BLOCK_SIZE,
1819     - .walksize = SPECK_NEON_CHUNK_SIZE,
1820     - .setkey = speck128_xts_setkey,
1821     - .encrypt = speck128_xts_encrypt,
1822     - .decrypt = speck128_xts_decrypt,
1823     - }, {
1824     - .base.cra_name = "xts(speck64)",
1825     - .base.cra_driver_name = "xts-speck64-neon",
1826     - .base.cra_priority = 300,
1827     - .base.cra_blocksize = SPECK64_BLOCK_SIZE,
1828     - .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
1829     - .base.cra_alignmask = 7,
1830     - .base.cra_module = THIS_MODULE,
1831     - .min_keysize = 2 * SPECK64_96_KEY_SIZE,
1832     - .max_keysize = 2 * SPECK64_128_KEY_SIZE,
1833     - .ivsize = SPECK64_BLOCK_SIZE,
1834     - .walksize = SPECK_NEON_CHUNK_SIZE,
1835     - .setkey = speck64_xts_setkey,
1836     - .encrypt = speck64_xts_encrypt,
1837     - .decrypt = speck64_xts_decrypt,
1838     - }
1839     -};
1840     -
1841     -static int __init speck_neon_module_init(void)
1842     -{
1843     - if (!(elf_hwcap & HWCAP_ASIMD))
1844     - return -ENODEV;
1845     - return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
1846     -}
1847     -
1848     -static void __exit speck_neon_module_exit(void)
1849     -{
1850     - crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
1851     -}
1852     -
1853     -module_init(speck_neon_module_init);
1854     -module_exit(speck_neon_module_exit);
1855     -
1856     -MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
1857     -MODULE_LICENSE("GPL");
1858     -MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
1859     -MODULE_ALIAS_CRYPTO("xts(speck128)");
1860     -MODULE_ALIAS_CRYPTO("xts-speck128-neon");
1861     -MODULE_ALIAS_CRYPTO("xts(speck64)");
1862     -MODULE_ALIAS_CRYPTO("xts-speck64-neon");
1863     diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
1864     index e238b7932096..93f69d82225d 100644
1865     --- a/arch/arm64/kernel/cpufeature.c
1866     +++ b/arch/arm64/kernel/cpufeature.c
1867     @@ -848,15 +848,29 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
1868     }
1869    
1870     static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
1871     - int __unused)
1872     + int scope)
1873     {
1874     - return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
1875     + u64 ctr;
1876     +
1877     + if (scope == SCOPE_SYSTEM)
1878     + ctr = arm64_ftr_reg_ctrel0.sys_val;
1879     + else
1880     + ctr = read_cpuid_cachetype();
1881     +
1882     + return ctr & BIT(CTR_IDC_SHIFT);
1883     }
1884    
1885     static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
1886     - int __unused)
1887     + int scope)
1888     {
1889     - return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
1890     + u64 ctr;
1891     +
1892     + if (scope == SCOPE_SYSTEM)
1893     + ctr = arm64_ftr_reg_ctrel0.sys_val;
1894     + else
1895     + ctr = read_cpuid_cachetype();
1896     +
1897     + return ctr & BIT(CTR_DIC_SHIFT);
1898     }
1899    
1900     #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1901     diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
1902     index 09dbea221a27..8556876c9109 100644
1903     --- a/arch/arm64/kernel/entry.S
1904     +++ b/arch/arm64/kernel/entry.S
1905     @@ -589,7 +589,7 @@ el1_undef:
1906     inherit_daif pstate=x23, tmp=x2
1907     mov x0, sp
1908     bl do_undefinstr
1909     - ASM_BUG()
1910     + kernel_exit 1
1911     el1_dbg:
1912     /*
1913     * Debug exception handling
1914     diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
1915     index 039e9ff379cc..b9da093e0341 100644
1916     --- a/arch/arm64/kernel/traps.c
1917     +++ b/arch/arm64/kernel/traps.c
1918     @@ -310,10 +310,12 @@ static int call_undef_hook(struct pt_regs *regs)
1919     int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
1920     void __user *pc = (void __user *)instruction_pointer(regs);
1921    
1922     - if (!user_mode(regs))
1923     - return 1;
1924     -
1925     - if (compat_thumb_mode(regs)) {
1926     + if (!user_mode(regs)) {
1927     + __le32 instr_le;
1928     + if (probe_kernel_address((__force __le32 *)pc, instr_le))
1929     + goto exit;
1930     + instr = le32_to_cpu(instr_le);
1931     + } else if (compat_thumb_mode(regs)) {
1932     /* 16-bit Thumb instruction */
1933     __le16 instr_le;
1934     if (get_user(instr_le, (__le16 __user *)pc))
1935     @@ -407,6 +409,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
1936     return;
1937    
1938     force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
1939     + BUG_ON(!user_mode(regs));
1940     }
1941    
1942     void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
1943     diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
1944     index 68755fd70dcf..5df2d611b77d 100644
1945     --- a/arch/arm64/lib/Makefile
1946     +++ b/arch/arm64/lib/Makefile
1947     @@ -12,7 +12,7 @@ lib-y := clear_user.o delay.o copy_from_user.o \
1948     # when supported by the CPU. Result and argument registers are handled
1949     # correctly, based on the function prototype.
1950     lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
1951     -CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
1952     +CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
1953     -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
1954     -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
1955     -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
1956     diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
1957     index 1d5483f6e457..93a3c3c0238c 100644
1958     --- a/arch/m68k/configs/amiga_defconfig
1959     +++ b/arch/m68k/configs/amiga_defconfig
1960     @@ -657,7 +657,6 @@ CONFIG_CRYPTO_SALSA20=m
1961     CONFIG_CRYPTO_SEED=m
1962     CONFIG_CRYPTO_SERPENT=m
1963     CONFIG_CRYPTO_SM4=m
1964     -CONFIG_CRYPTO_SPECK=m
1965     CONFIG_CRYPTO_TEA=m
1966     CONFIG_CRYPTO_TWOFISH=m
1967     CONFIG_CRYPTO_LZO=m
1968     diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
1969     index 52a0af127951..e3d0efd6397d 100644
1970     --- a/arch/m68k/configs/apollo_defconfig
1971     +++ b/arch/m68k/configs/apollo_defconfig
1972     @@ -614,7 +614,6 @@ CONFIG_CRYPTO_SALSA20=m
1973     CONFIG_CRYPTO_SEED=m
1974     CONFIG_CRYPTO_SERPENT=m
1975     CONFIG_CRYPTO_SM4=m
1976     -CONFIG_CRYPTO_SPECK=m
1977     CONFIG_CRYPTO_TEA=m
1978     CONFIG_CRYPTO_TWOFISH=m
1979     CONFIG_CRYPTO_LZO=m
1980     diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
1981     index b3103e51268a..75ac0c76e884 100644
1982     --- a/arch/m68k/configs/atari_defconfig
1983     +++ b/arch/m68k/configs/atari_defconfig
1984     @@ -635,7 +635,6 @@ CONFIG_CRYPTO_SALSA20=m
1985     CONFIG_CRYPTO_SEED=m
1986     CONFIG_CRYPTO_SERPENT=m
1987     CONFIG_CRYPTO_SM4=m
1988     -CONFIG_CRYPTO_SPECK=m
1989     CONFIG_CRYPTO_TEA=m
1990     CONFIG_CRYPTO_TWOFISH=m
1991     CONFIG_CRYPTO_LZO=m
1992     diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
1993     index fb7d651a4cab..c6e492700188 100644
1994     --- a/arch/m68k/configs/bvme6000_defconfig
1995     +++ b/arch/m68k/configs/bvme6000_defconfig
1996     @@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
1997     CONFIG_CRYPTO_SEED=m
1998     CONFIG_CRYPTO_SERPENT=m
1999     CONFIG_CRYPTO_SM4=m
2000     -CONFIG_CRYPTO_SPECK=m
2001     CONFIG_CRYPTO_TEA=m
2002     CONFIG_CRYPTO_TWOFISH=m
2003     CONFIG_CRYPTO_LZO=m
2004     diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
2005     index 6b37f5537c39..b00d1c477432 100644
2006     --- a/arch/m68k/configs/hp300_defconfig
2007     +++ b/arch/m68k/configs/hp300_defconfig
2008     @@ -616,7 +616,6 @@ CONFIG_CRYPTO_SALSA20=m
2009     CONFIG_CRYPTO_SEED=m
2010     CONFIG_CRYPTO_SERPENT=m
2011     CONFIG_CRYPTO_SM4=m
2012     -CONFIG_CRYPTO_SPECK=m
2013     CONFIG_CRYPTO_TEA=m
2014     CONFIG_CRYPTO_TWOFISH=m
2015     CONFIG_CRYPTO_LZO=m
2016     diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
2017     index c717bf879449..85cac3770d89 100644
2018     --- a/arch/m68k/configs/mac_defconfig
2019     +++ b/arch/m68k/configs/mac_defconfig
2020     @@ -638,7 +638,6 @@ CONFIG_CRYPTO_SALSA20=m
2021     CONFIG_CRYPTO_SEED=m
2022     CONFIG_CRYPTO_SERPENT=m
2023     CONFIG_CRYPTO_SM4=m
2024     -CONFIG_CRYPTO_SPECK=m
2025     CONFIG_CRYPTO_TEA=m
2026     CONFIG_CRYPTO_TWOFISH=m
2027     CONFIG_CRYPTO_LZO=m
2028     diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
2029     index 226c994ce794..b3a5d1e99d27 100644
2030     --- a/arch/m68k/configs/multi_defconfig
2031     +++ b/arch/m68k/configs/multi_defconfig
2032     @@ -720,7 +720,6 @@ CONFIG_CRYPTO_SALSA20=m
2033     CONFIG_CRYPTO_SEED=m
2034     CONFIG_CRYPTO_SERPENT=m
2035     CONFIG_CRYPTO_SM4=m
2036     -CONFIG_CRYPTO_SPECK=m
2037     CONFIG_CRYPTO_TEA=m
2038     CONFIG_CRYPTO_TWOFISH=m
2039     CONFIG_CRYPTO_LZO=m
2040     diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
2041     index b383327fd77a..0ca22608453f 100644
2042     --- a/arch/m68k/configs/mvme147_defconfig
2043     +++ b/arch/m68k/configs/mvme147_defconfig
2044     @@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
2045     CONFIG_CRYPTO_SEED=m
2046     CONFIG_CRYPTO_SERPENT=m
2047     CONFIG_CRYPTO_SM4=m
2048     -CONFIG_CRYPTO_SPECK=m
2049     CONFIG_CRYPTO_TEA=m
2050     CONFIG_CRYPTO_TWOFISH=m
2051     CONFIG_CRYPTO_LZO=m
2052     diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
2053     index 9783d3deb9e9..8e3d10d12d9c 100644
2054     --- a/arch/m68k/configs/mvme16x_defconfig
2055     +++ b/arch/m68k/configs/mvme16x_defconfig
2056     @@ -606,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
2057     CONFIG_CRYPTO_SEED=m
2058     CONFIG_CRYPTO_SERPENT=m
2059     CONFIG_CRYPTO_SM4=m
2060     -CONFIG_CRYPTO_SPECK=m
2061     CONFIG_CRYPTO_TEA=m
2062     CONFIG_CRYPTO_TWOFISH=m
2063     CONFIG_CRYPTO_LZO=m
2064     diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
2065     index a35d10ee10cb..ff7e653ec7fa 100644
2066     --- a/arch/m68k/configs/q40_defconfig
2067     +++ b/arch/m68k/configs/q40_defconfig
2068     @@ -629,7 +629,6 @@ CONFIG_CRYPTO_SALSA20=m
2069     CONFIG_CRYPTO_SEED=m
2070     CONFIG_CRYPTO_SERPENT=m
2071     CONFIG_CRYPTO_SM4=m
2072     -CONFIG_CRYPTO_SPECK=m
2073     CONFIG_CRYPTO_TEA=m
2074     CONFIG_CRYPTO_TWOFISH=m
2075     CONFIG_CRYPTO_LZO=m
2076     diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
2077     index 573bf922d448..612cf46f6d0c 100644
2078     --- a/arch/m68k/configs/sun3_defconfig
2079     +++ b/arch/m68k/configs/sun3_defconfig
2080     @@ -607,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m
2081     CONFIG_CRYPTO_SEED=m
2082     CONFIG_CRYPTO_SERPENT=m
2083     CONFIG_CRYPTO_SM4=m
2084     -CONFIG_CRYPTO_SPECK=m
2085     CONFIG_CRYPTO_TEA=m
2086     CONFIG_CRYPTO_TWOFISH=m
2087     CONFIG_CRYPTO_LZO=m
2088     diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
2089     index efb27a7fcc55..a6a7bb6dc3fd 100644
2090     --- a/arch/m68k/configs/sun3x_defconfig
2091     +++ b/arch/m68k/configs/sun3x_defconfig
2092     @@ -608,7 +608,6 @@ CONFIG_CRYPTO_SALSA20=m
2093     CONFIG_CRYPTO_SEED=m
2094     CONFIG_CRYPTO_SERPENT=m
2095     CONFIG_CRYPTO_SM4=m
2096     -CONFIG_CRYPTO_SPECK=m
2097     CONFIG_CRYPTO_TEA=m
2098     CONFIG_CRYPTO_TWOFISH=m
2099     CONFIG_CRYPTO_LZO=m
2100     diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
2101     index 75108ec669eb..6c79e8a16a26 100644
2102     --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
2103     +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
2104     @@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
2105     void (*cvmx_override_ipd_port_setup) (int ipd_port);
2106    
2107     /* Port count per interface */
2108     -static int interface_port_count[5];
2109     +static int interface_port_count[9];
2110    
2111     /**
2112     * Return the number of interfaces the chip has. Each interface
2113     diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
2114     index 49d6046ca1d0..c373eb605040 100644
2115     --- a/arch/mips/include/asm/processor.h
2116     +++ b/arch/mips/include/asm/processor.h
2117     @@ -81,7 +81,7 @@ extern unsigned int vced_count, vcei_count;
2118    
2119     #endif
2120    
2121     -#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
2122     +#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
2123    
2124     extern unsigned long mips_stack_top(void);
2125     #define STACK_TOP mips_stack_top()
2126     diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
2127     index 242c5ab65611..d2f92273fe37 100644
2128     --- a/arch/parisc/kernel/entry.S
2129     +++ b/arch/parisc/kernel/entry.S
2130     @@ -186,7 +186,7 @@
2131     bv,n 0(%r3)
2132     nop
2133     .word 0 /* checksum (will be patched) */
2134     - .word PA(os_hpmc) /* address of handler */
2135     + .word 0 /* address of handler */
2136     .word 0 /* length of handler */
2137     .endm
2138    
2139     diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
2140     index 781c3b9a3e46..fde654115564 100644
2141     --- a/arch/parisc/kernel/hpmc.S
2142     +++ b/arch/parisc/kernel/hpmc.S
2143     @@ -85,7 +85,7 @@ END(hpmc_pim_data)
2144    
2145     .import intr_save, code
2146     .align 16
2147     -ENTRY_CFI(os_hpmc)
2148     +ENTRY(os_hpmc)
2149     .os_hpmc:
2150    
2151     /*
2152     @@ -302,7 +302,6 @@ os_hpmc_6:
2153     b .
2154     nop
2155     .align 16 /* make function length multiple of 16 bytes */
2156     -ENDPROC_CFI(os_hpmc)
2157     .os_hpmc_end:
2158    
2159    
2160     diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2161     index 68f10f87073d..abeb5321a83f 100644
2162     --- a/arch/parisc/kernel/traps.c
2163     +++ b/arch/parisc/kernel/traps.c
2164     @@ -802,7 +802,8 @@ void __init initialize_ivt(const void *iva)
2165     * the Length/4 words starting at Address is zero.
2166     */
2167    
2168     - /* Compute Checksum for HPMC handler */
2169     + /* Setup IVA and compute checksum for HPMC handler */
2170     + ivap[6] = (u32)__pa(os_hpmc);
2171     length = os_hpmc_size;
2172     ivap[7] = length;
2173    
2174     diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
2175     index 74842d28a7a1..aae9b0d71c1e 100644
2176     --- a/arch/parisc/mm/init.c
2177     +++ b/arch/parisc/mm/init.c
2178     @@ -494,12 +494,8 @@ static void __init map_pages(unsigned long start_vaddr,
2179     pte = pte_mkhuge(pte);
2180     }
2181    
2182     - if (address >= end_paddr) {
2183     - if (force)
2184     - break;
2185     - else
2186     - pte_val(pte) = 0;
2187     - }
2188     + if (address >= end_paddr)
2189     + break;
2190    
2191     set_pte(pg_table, pte);
2192    
2193     diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
2194     index fad8ddd697ac..0abf2e7fd222 100644
2195     --- a/arch/powerpc/include/asm/mpic.h
2196     +++ b/arch/powerpc/include/asm/mpic.h
2197     @@ -393,7 +393,14 @@ extern struct bus_type mpic_subsys;
2198     #define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
2199    
2200     /* Get the version of primary MPIC */
2201     +#ifdef CONFIG_MPIC
2202     extern u32 fsl_mpic_primary_get_version(void);
2203     +#else
2204     +static inline u32 fsl_mpic_primary_get_version(void)
2205     +{
2206     + return 0;
2207     +}
2208     +#endif
2209    
2210     /* Allocate the controller structure and setup the linux irq descs
2211     * for the range if interrupts passed in. No HW initialization is
2212     diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
2213     index 3497c8329c1d..3022d67f0c48 100644
2214     --- a/arch/powerpc/kernel/mce_power.c
2215     +++ b/arch/powerpc/kernel/mce_power.c
2216     @@ -89,6 +89,13 @@ static void flush_and_reload_slb(void)
2217    
2218     static void flush_erat(void)
2219     {
2220     +#ifdef CONFIG_PPC_BOOK3S_64
2221     + if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
2222     + flush_and_reload_slb();
2223     + return;
2224     + }
2225     +#endif
2226     + /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */
2227     asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
2228     }
2229    
2230     diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
2231     index 77371c9ef3d8..2d861a36662e 100644
2232     --- a/arch/powerpc/kernel/module.c
2233     +++ b/arch/powerpc/kernel/module.c
2234     @@ -74,6 +74,14 @@ int module_finalize(const Elf_Ehdr *hdr,
2235     (void *)sect->sh_addr + sect->sh_size);
2236     #endif /* CONFIG_PPC64 */
2237    
2238     +#ifdef PPC64_ELF_ABI_v1
2239     + sect = find_section(hdr, sechdrs, ".opd");
2240     + if (sect != NULL) {
2241     + me->arch.start_opd = sect->sh_addr;
2242     + me->arch.end_opd = sect->sh_addr + sect->sh_size;
2243     + }
2244     +#endif /* PPC64_ELF_ABI_v1 */
2245     +
2246     #ifdef CONFIG_PPC_BARRIER_NOSPEC
2247     sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
2248     if (sect != NULL)
2249     diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
2250     index b8d61e019d06..2c53de9f3b6a 100644
2251     --- a/arch/powerpc/kernel/module_64.c
2252     +++ b/arch/powerpc/kernel/module_64.c
2253     @@ -360,11 +360,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
2254     else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
2255     dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
2256     sechdrs[i].sh_size);
2257     - else if (!strcmp(secstrings + sechdrs[i].sh_name, ".opd")) {
2258     - me->arch.start_opd = sechdrs[i].sh_addr;
2259     - me->arch.end_opd = sechdrs[i].sh_addr +
2260     - sechdrs[i].sh_size;
2261     - }
2262    
2263     /* We don't handle .init for the moment: rename to _init */
2264     while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
2265     diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
2266     index 6a501b25dd85..faf00222b324 100644
2267     --- a/arch/powerpc/kernel/setup_64.c
2268     +++ b/arch/powerpc/kernel/setup_64.c
2269     @@ -243,13 +243,19 @@ static void cpu_ready_for_interrupts(void)
2270     }
2271    
2272     /*
2273     - * Fixup HFSCR:TM based on CPU features. The bit is set by our
2274     - * early asm init because at that point we haven't updated our
2275     - * CPU features from firmware and device-tree. Here we have,
2276     - * so let's do it.
2277     + * Set HFSCR:TM based on CPU features:
2278     + * In the special case of TM no suspend (P9N DD2.1), Linux is
2279     + * told TM is off via the dt-ftrs but told to (partially) use
2280     + * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
2281     + * will be off from dt-ftrs but we need to turn it on for the
2282     + * no suspend case.
2283     */
2284     - if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
2285     - mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
2286     + if (cpu_has_feature(CPU_FTR_HVMODE)) {
2287     + if (cpu_has_feature(CPU_FTR_TM_COMP))
2288     + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
2289     + else
2290     + mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
2291     + }
2292    
2293     /* Set IR and DR in PACA MSR */
2294     get_paca()->kernel_msr = MSR_KERNEL;
2295     diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
2296     index 729f02df8290..aaa28fd918fe 100644
2297     --- a/arch/powerpc/mm/hash_native_64.c
2298     +++ b/arch/powerpc/mm/hash_native_64.c
2299     @@ -115,6 +115,8 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
2300     tlbiel_hash_set_isa300(0, is, 0, 2, 1);
2301    
2302     asm volatile("ptesync": : :"memory");
2303     +
2304     + asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
2305     }
2306    
2307     void hash__tlbiel_all(unsigned int action)
2308     @@ -140,8 +142,6 @@ void hash__tlbiel_all(unsigned int action)
2309     tlbiel_all_isa206(POWER7_TLB_SETS, is);
2310     else
2311     WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
2312     -
2313     - asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
2314     }
2315    
2316     static inline unsigned long ___tlbie(unsigned long vpn, int psize,
2317     diff --git a/arch/s390/defconfig b/arch/s390/defconfig
2318     index f40600eb1762..5134c71a4937 100644
2319     --- a/arch/s390/defconfig
2320     +++ b/arch/s390/defconfig
2321     @@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m
2322     CONFIG_CRYPTO_SEED=m
2323     CONFIG_CRYPTO_SERPENT=m
2324     CONFIG_CRYPTO_SM4=m
2325     -CONFIG_CRYPTO_SPECK=m
2326     CONFIG_CRYPTO_TEA=m
2327     CONFIG_CRYPTO_TWOFISH=m
2328     CONFIG_CRYPTO_DEFLATE=m
2329     diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c
2330     index 0859cde36f75..888cc2f166db 100644
2331     --- a/arch/s390/kernel/sthyi.c
2332     +++ b/arch/s390/kernel/sthyi.c
2333     @@ -183,17 +183,19 @@ static void fill_hdr(struct sthyi_sctns *sctns)
2334     static void fill_stsi_mac(struct sthyi_sctns *sctns,
2335     struct sysinfo_1_1_1 *sysinfo)
2336     {
2337     + sclp_ocf_cpc_name_copy(sctns->mac.infmname);
2338     + if (*(u64 *)sctns->mac.infmname != 0)
2339     + sctns->mac.infmval1 |= MAC_NAME_VLD;
2340     +
2341     if (stsi(sysinfo, 1, 1, 1))
2342     return;
2343    
2344     - sclp_ocf_cpc_name_copy(sctns->mac.infmname);
2345     -
2346     memcpy(sctns->mac.infmtype, sysinfo->type, sizeof(sctns->mac.infmtype));
2347     memcpy(sctns->mac.infmmanu, sysinfo->manufacturer, sizeof(sctns->mac.infmmanu));
2348     memcpy(sctns->mac.infmpman, sysinfo->plant, sizeof(sctns->mac.infmpman));
2349     memcpy(sctns->mac.infmseq, sysinfo->sequence, sizeof(sctns->mac.infmseq));
2350    
2351     - sctns->mac.infmval1 |= MAC_ID_VLD | MAC_NAME_VLD;
2352     + sctns->mac.infmval1 |= MAC_ID_VLD;
2353     }
2354    
2355     static void fill_stsi_par(struct sthyi_sctns *sctns,
2356     diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
2357     index 1458b1700fc7..8b4c5e001157 100644
2358     --- a/arch/x86/boot/compressed/eboot.c
2359     +++ b/arch/x86/boot/compressed/eboot.c
2360     @@ -738,6 +738,7 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
2361     struct desc_struct *desc;
2362     void *handle;
2363     efi_system_table_t *_table;
2364     + unsigned long cmdline_paddr;
2365    
2366     efi_early = c;
2367    
2368     @@ -755,6 +756,15 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
2369     else
2370     setup_boot_services32(efi_early);
2371    
2372     + /*
2373     + * make_boot_params() may have been called before efi_main(), in which
2374     + * case this is the second time we parse the cmdline. This is ok,
2375     + * parsing the cmdline multiple times does not have side-effects.
2376     + */
2377     + cmdline_paddr = ((u64)hdr->cmd_line_ptr |
2378     + ((u64)boot_params->ext_cmd_line_ptr << 32));
2379     + efi_parse_options((char *)cmdline_paddr);
2380     +
2381     /*
2382     * If the boot loader gave us a value for secure_boot then we use that,
2383     * otherwise we ask the BIOS.
2384     diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
2385     index d4e6cd4577e5..bf0e82400358 100644
2386     --- a/arch/x86/boot/tools/build.c
2387     +++ b/arch/x86/boot/tools/build.c
2388     @@ -391,6 +391,13 @@ int main(int argc, char ** argv)
2389     die("Unable to mmap '%s': %m", argv[2]);
2390     /* Number of 16-byte paragraphs, including space for a 4-byte CRC */
2391     sys_size = (sz + 15 + 4) / 16;
2392     +#ifdef CONFIG_EFI_STUB
2393     + /*
2394     + * COFF requires minimum 32-byte alignment of sections, and
2395     + * adding a signature is problematic without that alignment.
2396     + */
2397     + sys_size = (sys_size + 1) & ~1;
2398     +#endif
2399    
2400     /* Patch the setup code with the appropriate size parameters */
2401     buf[0x1f1] = setup_sectors-1;
2402     diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
2403     index acbe7e8336d8..e4b78f962874 100644
2404     --- a/arch/x86/crypto/aesni-intel_glue.c
2405     +++ b/arch/x86/crypto/aesni-intel_glue.c
2406     @@ -817,7 +817,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
2407     /* Linearize assoc, if not already linear */
2408     if (req->src->length >= assoclen && req->src->length &&
2409     (!PageHighMem(sg_page(req->src)) ||
2410     - req->src->offset + req->src->length < PAGE_SIZE)) {
2411     + req->src->offset + req->src->length <= PAGE_SIZE)) {
2412     scatterwalk_start(&assoc_sg_walk, req->src);
2413     assoc = scatterwalk_map(&assoc_sg_walk);
2414     } else {
2415     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
2416     index 09b2e3e2cf1b..1c09a0d1771f 100644
2417     --- a/arch/x86/include/asm/kvm_host.h
2418     +++ b/arch/x86/include/asm/kvm_host.h
2419     @@ -177,6 +177,7 @@ enum {
2420    
2421     #define DR6_BD (1 << 13)
2422     #define DR6_BS (1 << 14)
2423     +#define DR6_BT (1 << 15)
2424     #define DR6_RTM (1 << 16)
2425     #define DR6_FIXED_1 0xfffe0ff0
2426     #define DR6_INIT 0xffff0ff0
2427     diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
2428     index 58ce5288878e..0e2130d8d6b1 100644
2429     --- a/arch/x86/include/asm/tlbflush.h
2430     +++ b/arch/x86/include/asm/tlbflush.h
2431     @@ -469,6 +469,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr)
2432     */
2433     static inline void __flush_tlb_all(void)
2434     {
2435     + /*
2436     + * This is to catch users with enabled preemption and the PGE feature
2437     + * and don't trigger the warning in __native_flush_tlb().
2438     + */
2439     + VM_WARN_ON_ONCE(preemptible());
2440     +
2441     if (boot_cpu_has(X86_FEATURE_PGE)) {
2442     __flush_tlb_global();
2443     } else {
2444     diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
2445     index 33399426793e..cc8258a5378b 100644
2446     --- a/arch/x86/kernel/check.c
2447     +++ b/arch/x86/kernel/check.c
2448     @@ -31,6 +31,11 @@ static __init int set_corruption_check(char *arg)
2449     ssize_t ret;
2450     unsigned long val;
2451    
2452     + if (!arg) {
2453     + pr_err("memory_corruption_check config string not provided\n");
2454     + return -EINVAL;
2455     + }
2456     +
2457     ret = kstrtoul(arg, 10, &val);
2458     if (ret)
2459     return ret;
2460     @@ -45,6 +50,11 @@ static __init int set_corruption_check_period(char *arg)
2461     ssize_t ret;
2462     unsigned long val;
2463    
2464     + if (!arg) {
2465     + pr_err("memory_corruption_check_period config string not provided\n");
2466     + return -EINVAL;
2467     + }
2468     +
2469     ret = kstrtoul(arg, 10, &val);
2470     if (ret)
2471     return ret;
2472     @@ -59,6 +69,11 @@ static __init int set_corruption_check_size(char *arg)
2473     char *end;
2474     unsigned size;
2475    
2476     + if (!arg) {
2477     + pr_err("memory_corruption_check_size config string not provided\n");
2478     + return -EINVAL;
2479     + }
2480     +
2481     size = memparse(arg, &end);
2482    
2483     if (*end == '\0')
2484     diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
2485     index 40bdaea97fe7..53eb14a65610 100644
2486     --- a/arch/x86/kernel/cpu/bugs.c
2487     +++ b/arch/x86/kernel/cpu/bugs.c
2488     @@ -35,12 +35,10 @@ static void __init spectre_v2_select_mitigation(void);
2489     static void __init ssb_select_mitigation(void);
2490     static void __init l1tf_select_mitigation(void);
2491    
2492     -/*
2493     - * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
2494     - * writes to SPEC_CTRL contain whatever reserved bits have been set.
2495     - */
2496     -u64 __ro_after_init x86_spec_ctrl_base;
2497     +/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
2498     +u64 x86_spec_ctrl_base;
2499     EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
2500     +static DEFINE_MUTEX(spec_ctrl_mutex);
2501    
2502     /*
2503     * The vendor and possibly platform specific bits which can be modified in
2504     @@ -325,6 +323,46 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
2505     return cmd;
2506     }
2507    
2508     +static bool stibp_needed(void)
2509     +{
2510     + if (spectre_v2_enabled == SPECTRE_V2_NONE)
2511     + return false;
2512     +
2513     + if (!boot_cpu_has(X86_FEATURE_STIBP))
2514     + return false;
2515     +
2516     + return true;
2517     +}
2518     +
2519     +static void update_stibp_msr(void *info)
2520     +{
2521     + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
2522     +}
2523     +
2524     +void arch_smt_update(void)
2525     +{
2526     + u64 mask;
2527     +
2528     + if (!stibp_needed())
2529     + return;
2530     +
2531     + mutex_lock(&spec_ctrl_mutex);
2532     + mask = x86_spec_ctrl_base;
2533     + if (cpu_smt_control == CPU_SMT_ENABLED)
2534     + mask |= SPEC_CTRL_STIBP;
2535     + else
2536     + mask &= ~SPEC_CTRL_STIBP;
2537     +
2538     + if (mask != x86_spec_ctrl_base) {
2539     + pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
2540     + cpu_smt_control == CPU_SMT_ENABLED ?
2541     + "Enabling" : "Disabling");
2542     + x86_spec_ctrl_base = mask;
2543     + on_each_cpu(update_stibp_msr, NULL, 1);
2544     + }
2545     + mutex_unlock(&spec_ctrl_mutex);
2546     +}
2547     +
2548     static void __init spectre_v2_select_mitigation(void)
2549     {
2550     enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
2551     @@ -424,6 +462,9 @@ specv2_set_mode:
2552     setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
2553     pr_info("Enabling Restricted Speculation for firmware calls\n");
2554     }
2555     +
2556     + /* Enable STIBP if appropriate */
2557     + arch_smt_update();
2558     }
2559    
2560     #undef pr_fmt
2561     @@ -814,6 +855,8 @@ static ssize_t l1tf_show_state(char *buf)
2562     static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2563     char *buf, unsigned int bug)
2564     {
2565     + int ret;
2566     +
2567     if (!boot_cpu_has_bug(bug))
2568     return sprintf(buf, "Not affected\n");
2569    
2570     @@ -831,10 +874,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
2571     return sprintf(buf, "Mitigation: __user pointer sanitization\n");
2572    
2573     case X86_BUG_SPECTRE_V2:
2574     - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
2575     + ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
2576     boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
2577     boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2578     + (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
2579     spectre_v2_module_string());
2580     + return ret;
2581    
2582     case X86_BUG_SPEC_STORE_BYPASS:
2583     return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
2584     diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
2585     index b140c68bc14b..643670fb8943 100644
2586     --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
2587     +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
2588     @@ -2805,6 +2805,13 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
2589     {
2590     if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
2591     seq_puts(seq, ",cdp");
2592     +
2593     + if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
2594     + seq_puts(seq, ",cdpl2");
2595     +
2596     + if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
2597     + seq_puts(seq, ",mba_MBps");
2598     +
2599     return 0;
2600     }
2601    
2602     diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
2603     index eaf02f2e7300..40b16b270656 100644
2604     --- a/arch/x86/kernel/kprobes/opt.c
2605     +++ b/arch/x86/kernel/kprobes/opt.c
2606     @@ -179,7 +179,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
2607     opt_pre_handler(&op->kp, regs);
2608     __this_cpu_write(current_kprobe, NULL);
2609     }
2610     - preempt_enable_no_resched();
2611     + preempt_enable();
2612     }
2613     NOKPROBE_SYMBOL(optimized_callback);
2614    
2615     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2616     index e665aa7167cf..9f3def7baa6d 100644
2617     --- a/arch/x86/kvm/vmx.c
2618     +++ b/arch/x86/kvm/vmx.c
2619     @@ -3294,10 +3294,13 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
2620     }
2621     } else {
2622     if (vmcs12->exception_bitmap & (1u << nr)) {
2623     - if (nr == DB_VECTOR)
2624     + if (nr == DB_VECTOR) {
2625     *exit_qual = vcpu->arch.dr6;
2626     - else
2627     + *exit_qual &= ~(DR6_FIXED_1 | DR6_BT);
2628     + *exit_qual ^= DR6_RTM;
2629     + } else {
2630     *exit_qual = 0;
2631     + }
2632     return 1;
2633     }
2634     }
2635     @@ -14010,13 +14013,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
2636     if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
2637     return -EINVAL;
2638    
2639     - if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
2640     - return -EINVAL;
2641     -
2642     - if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
2643     - !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
2644     - return -EINVAL;
2645     -
2646     if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
2647     (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
2648     return -EINVAL;
2649     @@ -14046,6 +14042,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
2650     if (ret)
2651     return ret;
2652    
2653     + /* Empty 'VMXON' state is permitted */
2654     + if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
2655     + return 0;
2656     +
2657     + if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
2658     + !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
2659     + return -EINVAL;
2660     +
2661     set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
2662    
2663     if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
2664     diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
2665     index b54d52a2d00a..d71d72cf6c66 100644
2666     --- a/arch/x86/mm/numa_emulation.c
2667     +++ b/arch/x86/mm/numa_emulation.c
2668     @@ -400,9 +400,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
2669     n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
2670     ret = -1;
2671     for_each_node_mask(i, physnode_mask) {
2672     + /*
2673     + * The reason we pass in blk[0] is due to
2674     + * numa_remove_memblk_from() called by
2675     + * emu_setup_memblk() will delete entry 0
2676     + * and then move everything else up in the pi.blk
2677     + * array. Therefore we should always be looking
2678     + * at blk[0].
2679     + */
2680     ret = split_nodes_size_interleave_uniform(&ei, &pi,
2681     - pi.blk[i].start, pi.blk[i].end, 0,
2682     - n, &pi.blk[i], nid);
2683     + pi.blk[0].start, pi.blk[0].end, 0,
2684     + n, &pi.blk[0], nid);
2685     if (ret < 0)
2686     break;
2687     if (ret < n) {
2688     diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
2689     index 51a5a69ecac9..e2d4b25c7aa4 100644
2690     --- a/arch/x86/mm/pageattr.c
2691     +++ b/arch/x86/mm/pageattr.c
2692     @@ -2086,9 +2086,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
2693    
2694     /*
2695     * We should perform an IPI and flush all tlbs,
2696     - * but that can deadlock->flush only current cpu:
2697     + * but that can deadlock->flush only current cpu.
2698     + * Preemption needs to be disabled around __flush_tlb_all() due to
2699     + * CR3 reload in __native_flush_tlb().
2700     */
2701     + preempt_disable();
2702     __flush_tlb_all();
2703     + preempt_enable();
2704    
2705     arch_flush_lazy_mmu_mode();
2706     }
2707     diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c
2708     index a2b4efddd61a..8e7ddd7e313a 100644
2709     --- a/arch/x86/platform/olpc/olpc-xo1-rtc.c
2710     +++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c
2711     @@ -16,6 +16,7 @@
2712    
2713     #include <asm/msr.h>
2714     #include <asm/olpc.h>
2715     +#include <asm/x86_init.h>
2716    
2717     static void rtc_wake_on(struct device *dev)
2718     {
2719     @@ -75,6 +76,8 @@ static int __init xo1_rtc_init(void)
2720     if (r)
2721     return r;
2722    
2723     + x86_platform.legacy.rtc = 0;
2724     +
2725     device_init_wakeup(&xo1_rtc_device.dev, 1);
2726     return 0;
2727     }
2728     diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
2729     index c85d1a88f476..f7f77023288a 100644
2730     --- a/arch/x86/xen/enlighten_pvh.c
2731     +++ b/arch/x86/xen/enlighten_pvh.c
2732     @@ -75,7 +75,7 @@ static void __init init_pvh_bootparams(void)
2733     * Version 2.12 supports Xen entry point but we will use default x86/PC
2734     * environment (i.e. hardware_subarch 0).
2735     */
2736     - pvh_bootparams.hdr.version = 0x212;
2737     + pvh_bootparams.hdr.version = (2 << 8) | 12;
2738     pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
2739    
2740     x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
2741     diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
2742     index 33a783c77d96..184b36922397 100644
2743     --- a/arch/x86/xen/platform-pci-unplug.c
2744     +++ b/arch/x86/xen/platform-pci-unplug.c
2745     @@ -146,6 +146,10 @@ void xen_unplug_emulated_devices(void)
2746     {
2747     int r;
2748    
2749     + /* PVH guests don't have emulated devices. */
2750     + if (xen_pvh_domain())
2751     + return;
2752     +
2753     /* user explicitly requested no unplug */
2754     if (xen_emul_unplug & XEN_UNPLUG_NEVER)
2755     return;
2756     diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
2757     index 973f10e05211..717b4847b473 100644
2758     --- a/arch/x86/xen/spinlock.c
2759     +++ b/arch/x86/xen/spinlock.c
2760     @@ -9,6 +9,7 @@
2761     #include <linux/log2.h>
2762     #include <linux/gfp.h>
2763     #include <linux/slab.h>
2764     +#include <linux/atomic.h>
2765    
2766     #include <asm/paravirt.h>
2767     #include <asm/qspinlock.h>
2768     @@ -21,6 +22,7 @@
2769    
2770     static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
2771     static DEFINE_PER_CPU(char *, irq_name);
2772     +static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
2773     static bool xen_pvspin = true;
2774    
2775     static void xen_qlock_kick(int cpu)
2776     @@ -40,33 +42,24 @@ static void xen_qlock_kick(int cpu)
2777     static void xen_qlock_wait(u8 *byte, u8 val)
2778     {
2779     int irq = __this_cpu_read(lock_kicker_irq);
2780     + atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
2781    
2782     /* If kicker interrupts not initialized yet, just spin */
2783     - if (irq == -1)
2784     + if (irq == -1 || in_nmi())
2785     return;
2786    
2787     - /* clear pending */
2788     - xen_clear_irq_pending(irq);
2789     - barrier();
2790     -
2791     - /*
2792     - * We check the byte value after clearing pending IRQ to make sure
2793     - * that we won't miss a wakeup event because of the clearing.
2794     - *
2795     - * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
2796     - * So it is effectively a memory barrier for x86.
2797     - */
2798     - if (READ_ONCE(*byte) != val)
2799     - return;
2800     + /* Detect reentry. */
2801     + atomic_inc(nest_cnt);
2802    
2803     - /*
2804     - * If an interrupt happens here, it will leave the wakeup irq
2805     - * pending, which will cause xen_poll_irq() to return
2806     - * immediately.
2807     - */
2808     + /* If irq pending already and no nested call clear it. */
2809     + if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
2810     + xen_clear_irq_pending(irq);
2811     + } else if (READ_ONCE(*byte) == val) {
2812     + /* Block until irq becomes pending (or a spurious wakeup) */
2813     + xen_poll_irq(irq);
2814     + }
2815    
2816     - /* Block until irq becomes pending (or perhaps a spurious wakeup) */
2817     - xen_poll_irq(irq);
2818     + atomic_dec(nest_cnt);
2819     }
2820    
2821     static irqreturn_t dummy_handler(int irq, void *dev_id)
2822     diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
2823     index ca2d3b2bf2af..58722a052f9c 100644
2824     --- a/arch/x86/xen/xen-pvh.S
2825     +++ b/arch/x86/xen/xen-pvh.S
2826     @@ -181,7 +181,7 @@ canary:
2827     .fill 48, 1, 0
2828    
2829     early_stack:
2830     - .fill 256, 1, 0
2831     + .fill BOOT_STACK_SIZE, 1, 0
2832     early_stack_end:
2833    
2834     ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
2835     diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
2836     index ae52bff43ce4..ff7c2d470bb8 100644
2837     --- a/block/bfq-wf2q.c
2838     +++ b/block/bfq-wf2q.c
2839     @@ -1181,10 +1181,17 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
2840     st = bfq_entity_service_tree(entity);
2841     is_in_service = entity == sd->in_service_entity;
2842    
2843     - if (is_in_service) {
2844     - bfq_calc_finish(entity, entity->service);
2845     + bfq_calc_finish(entity, entity->service);
2846     +
2847     + if (is_in_service)
2848     sd->in_service_entity = NULL;
2849     - }
2850     + else
2851     + /*
2852     + * Non in-service entity: nobody will take care of
2853     + * resetting its service counter on expiration. Do it
2854     + * now.
2855     + */
2856     + entity->service = 0;
2857    
2858     if (entity->tree == &st->active)
2859     bfq_active_extract(st, entity);
2860     diff --git a/block/blk-lib.c b/block/blk-lib.c
2861     index bbd44666f2b5..1f196cf0aa5d 100644
2862     --- a/block/blk-lib.c
2863     +++ b/block/blk-lib.c
2864     @@ -58,8 +58,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
2865    
2866     if (!req_sects)
2867     goto fail;
2868     - if (req_sects > UINT_MAX >> 9)
2869     - req_sects = UINT_MAX >> 9;
2870     + req_sects = min(req_sects, bio_allowed_max_sectors(q));
2871    
2872     end_sect = sector + req_sects;
2873    
2874     @@ -162,7 +161,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
2875     return -EOPNOTSUPP;
2876    
2877     /* Ensure that max_write_same_sectors doesn't overflow bi_size */
2878     - max_write_same_sectors = UINT_MAX >> 9;
2879     + max_write_same_sectors = bio_allowed_max_sectors(q);
2880    
2881     while (nr_sects) {
2882     bio = next_bio(bio, 1, gfp_mask);
2883     diff --git a/block/blk-merge.c b/block/blk-merge.c
2884     index aaec38cc37b8..2e042190a4f1 100644
2885     --- a/block/blk-merge.c
2886     +++ b/block/blk-merge.c
2887     @@ -27,7 +27,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
2888     /* Zero-sector (unknown) and one-sector granularities are the same. */
2889     granularity = max(q->limits.discard_granularity >> 9, 1U);
2890    
2891     - max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
2892     + max_discard_sectors = min(q->limits.max_discard_sectors,
2893     + bio_allowed_max_sectors(q));
2894     max_discard_sectors -= max_discard_sectors % granularity;
2895    
2896     if (unlikely(!max_discard_sectors)) {
2897     diff --git a/block/blk.h b/block/blk.h
2898     index 9db4e389582c..977d4b5d968d 100644
2899     --- a/block/blk.h
2900     +++ b/block/blk.h
2901     @@ -328,6 +328,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
2902     return rq->__deadline & ~0x1UL;
2903     }
2904    
2905     +/*
2906     + * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
2907     + * is defined as 'unsigned int', meantime it has to aligned to with logical
2908     + * block size which is the minimum accepted unit by hardware.
2909     + */
2910     +static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
2911     +{
2912     + return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
2913     +}
2914     +
2915     /*
2916     * Internal io_context interface
2917     */
2918     diff --git a/block/bounce.c b/block/bounce.c
2919     index bc63b3a2d18c..418677dcec60 100644
2920     --- a/block/bounce.c
2921     +++ b/block/bounce.c
2922     @@ -31,6 +31,24 @@
2923     static struct bio_set bounce_bio_set, bounce_bio_split;
2924     static mempool_t page_pool, isa_page_pool;
2925    
2926     +static void init_bounce_bioset(void)
2927     +{
2928     + static bool bounce_bs_setup;
2929     + int ret;
2930     +
2931     + if (bounce_bs_setup)
2932     + return;
2933     +
2934     + ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
2935     + BUG_ON(ret);
2936     + if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
2937     + BUG_ON(1);
2938     +
2939     + ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
2940     + BUG_ON(ret);
2941     + bounce_bs_setup = true;
2942     +}
2943     +
2944     #if defined(CONFIG_HIGHMEM)
2945     static __init int init_emergency_pool(void)
2946     {
2947     @@ -44,14 +62,7 @@ static __init int init_emergency_pool(void)
2948     BUG_ON(ret);
2949     pr_info("pool size: %d pages\n", POOL_SIZE);
2950    
2951     - ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
2952     - BUG_ON(ret);
2953     - if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
2954     - BUG_ON(1);
2955     -
2956     - ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
2957     - BUG_ON(ret);
2958     -
2959     + init_bounce_bioset();
2960     return 0;
2961     }
2962    
2963     @@ -86,6 +97,8 @@ static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
2964     return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
2965     }
2966    
2967     +static DEFINE_MUTEX(isa_mutex);
2968     +
2969     /*
2970     * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
2971     * as the max address, so check if the pool has already been created.
2972     @@ -94,14 +107,20 @@ int init_emergency_isa_pool(void)
2973     {
2974     int ret;
2975    
2976     - if (mempool_initialized(&isa_page_pool))
2977     + mutex_lock(&isa_mutex);
2978     +
2979     + if (mempool_initialized(&isa_page_pool)) {
2980     + mutex_unlock(&isa_mutex);
2981     return 0;
2982     + }
2983    
2984     ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
2985     mempool_free_pages, (void *) 0);
2986     BUG_ON(ret);
2987    
2988     pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
2989     + init_bounce_bioset();
2990     + mutex_unlock(&isa_mutex);
2991     return 0;
2992     }
2993    
2994     diff --git a/crypto/Kconfig b/crypto/Kconfig
2995     index f3e40ac56d93..59e32623a7ce 100644
2996     --- a/crypto/Kconfig
2997     +++ b/crypto/Kconfig
2998     @@ -1590,20 +1590,6 @@ config CRYPTO_SM4
2999    
3000     If unsure, say N.
3001    
3002     -config CRYPTO_SPECK
3003     - tristate "Speck cipher algorithm"
3004     - select CRYPTO_ALGAPI
3005     - help
3006     - Speck is a lightweight block cipher that is tuned for optimal
3007     - performance in software (rather than hardware).
3008     -
3009     - Speck may not be as secure as AES, and should only be used on systems
3010     - where AES is not fast enough.
3011     -
3012     - See also: <https://eprint.iacr.org/2013/404.pdf>
3013     -
3014     - If unsure, say N.
3015     -
3016     config CRYPTO_TEA
3017     tristate "TEA, XTEA and XETA cipher algorithms"
3018     select CRYPTO_ALGAPI
3019     diff --git a/crypto/Makefile b/crypto/Makefile
3020     index 6d1d40eeb964..f6a234d08882 100644
3021     --- a/crypto/Makefile
3022     +++ b/crypto/Makefile
3023     @@ -115,7 +115,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
3024     obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
3025     obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
3026     obj-$(CONFIG_CRYPTO_SEED) += seed.o
3027     -obj-$(CONFIG_CRYPTO_SPECK) += speck.o
3028     obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
3029     obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
3030     obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
3031     diff --git a/crypto/aegis.h b/crypto/aegis.h
3032     index f1c6900ddb80..405e025fc906 100644
3033     --- a/crypto/aegis.h
3034     +++ b/crypto/aegis.h
3035     @@ -21,7 +21,7 @@
3036    
3037     union aegis_block {
3038     __le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)];
3039     - u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)];
3040     + __le32 words32[AEGIS_BLOCK_SIZE / sizeof(__le32)];
3041     u8 bytes[AEGIS_BLOCK_SIZE];
3042     };
3043    
3044     @@ -57,24 +57,22 @@ static void crypto_aegis_aesenc(union aegis_block *dst,
3045     const union aegis_block *src,
3046     const union aegis_block *key)
3047     {
3048     - u32 *d = dst->words32;
3049     const u8 *s = src->bytes;
3050     - const u32 *k = key->words32;
3051     const u32 *t0 = crypto_ft_tab[0];
3052     const u32 *t1 = crypto_ft_tab[1];
3053     const u32 *t2 = crypto_ft_tab[2];
3054     const u32 *t3 = crypto_ft_tab[3];
3055     u32 d0, d1, d2, d3;
3056    
3057     - d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0];
3058     - d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1];
3059     - d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2];
3060     - d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3];
3061     + d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]];
3062     + d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]];
3063     + d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]];
3064     + d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]];
3065    
3066     - d[0] = d0;
3067     - d[1] = d1;
3068     - d[2] = d2;
3069     - d[3] = d3;
3070     + dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0];
3071     + dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1];
3072     + dst->words32[2] = cpu_to_le32(d2) ^ key->words32[2];
3073     + dst->words32[3] = cpu_to_le32(d3) ^ key->words32[3];
3074     }
3075    
3076     #endif /* _CRYPTO_AEGIS_H */
3077     diff --git a/crypto/lrw.c b/crypto/lrw.c
3078     index 393a782679c7..5504d1325a56 100644
3079     --- a/crypto/lrw.c
3080     +++ b/crypto/lrw.c
3081     @@ -143,7 +143,12 @@ static inline int get_index128(be128 *block)
3082     return x + ffz(val);
3083     }
3084    
3085     - return x;
3086     + /*
3087     + * If we get here, then x == 128 and we are incrementing the counter
3088     + * from all ones to all zeros. This means we must return index 127, i.e.
3089     + * the one corresponding to key2*{ 1,...,1 }.
3090     + */
3091     + return 127;
3092     }
3093    
3094     static int post_crypt(struct skcipher_request *req)
3095     diff --git a/crypto/morus1280.c b/crypto/morus1280.c
3096     index d057cf5ac4a8..3889c188f266 100644
3097     --- a/crypto/morus1280.c
3098     +++ b/crypto/morus1280.c
3099     @@ -385,14 +385,11 @@ static void crypto_morus1280_final(struct morus1280_state *state,
3100     struct morus1280_block *tag_xor,
3101     u64 assoclen, u64 cryptlen)
3102     {
3103     - u64 assocbits = assoclen * 8;
3104     - u64 cryptbits = cryptlen * 8;
3105     -
3106     struct morus1280_block tmp;
3107     unsigned int i;
3108    
3109     - tmp.words[0] = cpu_to_le64(assocbits);
3110     - tmp.words[1] = cpu_to_le64(cryptbits);
3111     + tmp.words[0] = assoclen * 8;
3112     + tmp.words[1] = cryptlen * 8;
3113     tmp.words[2] = 0;
3114     tmp.words[3] = 0;
3115    
3116     diff --git a/crypto/morus640.c b/crypto/morus640.c
3117     index 1ca76e54281b..da06ec2f6a80 100644
3118     --- a/crypto/morus640.c
3119     +++ b/crypto/morus640.c
3120     @@ -384,21 +384,13 @@ static void crypto_morus640_final(struct morus640_state *state,
3121     struct morus640_block *tag_xor,
3122     u64 assoclen, u64 cryptlen)
3123     {
3124     - u64 assocbits = assoclen * 8;
3125     - u64 cryptbits = cryptlen * 8;
3126     -
3127     - u32 assocbits_lo = (u32)assocbits;
3128     - u32 assocbits_hi = (u32)(assocbits >> 32);
3129     - u32 cryptbits_lo = (u32)cryptbits;
3130     - u32 cryptbits_hi = (u32)(cryptbits >> 32);
3131     -
3132     struct morus640_block tmp;
3133     unsigned int i;
3134    
3135     - tmp.words[0] = cpu_to_le32(assocbits_lo);
3136     - tmp.words[1] = cpu_to_le32(assocbits_hi);
3137     - tmp.words[2] = cpu_to_le32(cryptbits_lo);
3138     - tmp.words[3] = cpu_to_le32(cryptbits_hi);
3139     + tmp.words[0] = lower_32_bits(assoclen * 8);
3140     + tmp.words[1] = upper_32_bits(assoclen * 8);
3141     + tmp.words[2] = lower_32_bits(cryptlen * 8);
3142     + tmp.words[3] = upper_32_bits(cryptlen * 8);
3143    
3144     for (i = 0; i < MORUS_BLOCK_WORDS; i++)
3145     state->s[4].words[i] ^= state->s[0].words[i];
3146     diff --git a/crypto/speck.c b/crypto/speck.c
3147     deleted file mode 100644
3148     index 58aa9f7f91f7..000000000000
3149     --- a/crypto/speck.c
3150     +++ /dev/null
3151     @@ -1,307 +0,0 @@
3152     -// SPDX-License-Identifier: GPL-2.0
3153     -/*
3154     - * Speck: a lightweight block cipher
3155     - *
3156     - * Copyright (c) 2018 Google, Inc
3157     - *
3158     - * Speck has 10 variants, including 5 block sizes. For now we only implement
3159     - * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
3160     - * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
3161     - * and a key size of K bits. The Speck128 variants are believed to be the most
3162     - * secure variants, and they use the same block size and key sizes as AES. The
3163     - * Speck64 variants are less secure, but on 32-bit processors are usually
3164     - * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
3165     - * secure and/or not as well suited for implementation on either 32-bit or
3166     - * 64-bit processors, so are omitted.
3167     - *
3168     - * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
3169     - * https://eprint.iacr.org/2013/404.pdf
3170     - *
3171     - * In a correspondence, the Speck designers have also clarified that the words
3172     - * should be interpreted in little-endian format, and the words should be
3173     - * ordered such that the first word of each block is 'y' rather than 'x', and
3174     - * the first key word (rather than the last) becomes the first round key.
3175     - */
3176     -
3177     -#include <asm/unaligned.h>
3178     -#include <crypto/speck.h>
3179     -#include <linux/bitops.h>
3180     -#include <linux/crypto.h>
3181     -#include <linux/init.h>
3182     -#include <linux/module.h>
3183     -
3184     -/* Speck128 */
3185     -
3186     -static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
3187     -{
3188     - *x = ror64(*x, 8);
3189     - *x += *y;
3190     - *x ^= k;
3191     - *y = rol64(*y, 3);
3192     - *y ^= *x;
3193     -}
3194     -
3195     -static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
3196     -{
3197     - *y ^= *x;
3198     - *y = ror64(*y, 3);
3199     - *x ^= k;
3200     - *x -= *y;
3201     - *x = rol64(*x, 8);
3202     -}
3203     -
3204     -void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
3205     - u8 *out, const u8 *in)
3206     -{
3207     - u64 y = get_unaligned_le64(in);
3208     - u64 x = get_unaligned_le64(in + 8);
3209     - int i;
3210     -
3211     - for (i = 0; i < ctx->nrounds; i++)
3212     - speck128_round(&x, &y, ctx->round_keys[i]);
3213     -
3214     - put_unaligned_le64(y, out);
3215     - put_unaligned_le64(x, out + 8);
3216     -}
3217     -EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
3218     -
3219     -static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
3220     -{
3221     - crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
3222     -}
3223     -
3224     -void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
3225     - u8 *out, const u8 *in)
3226     -{
3227     - u64 y = get_unaligned_le64(in);
3228     - u64 x = get_unaligned_le64(in + 8);
3229     - int i;
3230     -
3231     - for (i = ctx->nrounds - 1; i >= 0; i--)
3232     - speck128_unround(&x, &y, ctx->round_keys[i]);
3233     -
3234     - put_unaligned_le64(y, out);
3235     - put_unaligned_le64(x, out + 8);
3236     -}
3237     -EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
3238     -
3239     -static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
3240     -{
3241     - crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
3242     -}
3243     -
3244     -int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
3245     - unsigned int keylen)
3246     -{
3247     - u64 l[3];
3248     - u64 k;
3249     - int i;
3250     -
3251     - switch (keylen) {
3252     - case SPECK128_128_KEY_SIZE:
3253     - k = get_unaligned_le64(key);
3254     - l[0] = get_unaligned_le64(key + 8);
3255     - ctx->nrounds = SPECK128_128_NROUNDS;
3256     - for (i = 0; i < ctx->nrounds; i++) {
3257     - ctx->round_keys[i] = k;
3258     - speck128_round(&l[0], &k, i);
3259     - }
3260     - break;
3261     - case SPECK128_192_KEY_SIZE:
3262     - k = get_unaligned_le64(key);
3263     - l[0] = get_unaligned_le64(key + 8);
3264     - l[1] = get_unaligned_le64(key + 16);
3265     - ctx->nrounds = SPECK128_192_NROUNDS;
3266     - for (i = 0; i < ctx->nrounds; i++) {
3267     - ctx->round_keys[i] = k;
3268     - speck128_round(&l[i % 2], &k, i);
3269     - }
3270     - break;
3271     - case SPECK128_256_KEY_SIZE:
3272     - k = get_unaligned_le64(key);
3273     - l[0] = get_unaligned_le64(key + 8);
3274     - l[1] = get_unaligned_le64(key + 16);
3275     - l[2] = get_unaligned_le64(key + 24);
3276     - ctx->nrounds = SPECK128_256_NROUNDS;
3277     - for (i = 0; i < ctx->nrounds; i++) {
3278     - ctx->round_keys[i] = k;
3279     - speck128_round(&l[i % 3], &k, i);
3280     - }
3281     - break;
3282     - default:
3283     - return -EINVAL;
3284     - }
3285     -
3286     - return 0;
3287     -}
3288     -EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
3289     -
3290     -static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
3291     - unsigned int keylen)
3292     -{
3293     - return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
3294     -}
3295     -
3296     -/* Speck64 */
3297     -
3298     -static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
3299     -{
3300     - *x = ror32(*x, 8);
3301     - *x += *y;
3302     - *x ^= k;
3303     - *y = rol32(*y, 3);
3304     - *y ^= *x;
3305     -}
3306     -
3307     -static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
3308     -{
3309     - *y ^= *x;
3310     - *y = ror32(*y, 3);
3311     - *x ^= k;
3312     - *x -= *y;
3313     - *x = rol32(*x, 8);
3314     -}
3315     -
3316     -void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
3317     - u8 *out, const u8 *in)
3318     -{
3319     - u32 y = get_unaligned_le32(in);
3320     - u32 x = get_unaligned_le32(in + 4);
3321     - int i;
3322     -
3323     - for (i = 0; i < ctx->nrounds; i++)
3324     - speck64_round(&x, &y, ctx->round_keys[i]);
3325     -
3326     - put_unaligned_le32(y, out);
3327     - put_unaligned_le32(x, out + 4);
3328     -}
3329     -EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
3330     -
3331     -static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
3332     -{
3333     - crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
3334     -}
3335     -
3336     -void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
3337     - u8 *out, const u8 *in)
3338     -{
3339     - u32 y = get_unaligned_le32(in);
3340     - u32 x = get_unaligned_le32(in + 4);
3341     - int i;
3342     -
3343     - for (i = ctx->nrounds - 1; i >= 0; i--)
3344     - speck64_unround(&x, &y, ctx->round_keys[i]);
3345     -
3346     - put_unaligned_le32(y, out);
3347     - put_unaligned_le32(x, out + 4);
3348     -}
3349     -EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
3350     -
3351     -static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
3352     -{
3353     - crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
3354     -}
3355     -
3356     -int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
3357     - unsigned int keylen)
3358     -{
3359     - u32 l[3];
3360     - u32 k;
3361     - int i;
3362     -
3363     - switch (keylen) {
3364     - case SPECK64_96_KEY_SIZE:
3365     - k = get_unaligned_le32(key);
3366     - l[0] = get_unaligned_le32(key + 4);
3367     - l[1] = get_unaligned_le32(key + 8);
3368     - ctx->nrounds = SPECK64_96_NROUNDS;
3369     - for (i = 0; i < ctx->nrounds; i++) {
3370     - ctx->round_keys[i] = k;
3371     - speck64_round(&l[i % 2], &k, i);
3372     - }
3373     - break;
3374     - case SPECK64_128_KEY_SIZE:
3375     - k = get_unaligned_le32(key);
3376     - l[0] = get_unaligned_le32(key + 4);
3377     - l[1] = get_unaligned_le32(key + 8);
3378     - l[2] = get_unaligned_le32(key + 12);
3379     - ctx->nrounds = SPECK64_128_NROUNDS;
3380     - for (i = 0; i < ctx->nrounds; i++) {
3381     - ctx->round_keys[i] = k;
3382     - speck64_round(&l[i % 3], &k, i);
3383     - }
3384     - break;
3385     - default:
3386     - return -EINVAL;
3387     - }
3388     -
3389     - return 0;
3390     -}
3391     -EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
3392     -
3393     -static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
3394     - unsigned int keylen)
3395     -{
3396     - return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
3397     -}
3398     -
3399     -/* Algorithm definitions */
3400     -
3401     -static struct crypto_alg speck_algs[] = {
3402     - {
3403     - .cra_name = "speck128",
3404     - .cra_driver_name = "speck128-generic",
3405     - .cra_priority = 100,
3406     - .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
3407     - .cra_blocksize = SPECK128_BLOCK_SIZE,
3408     - .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
3409     - .cra_module = THIS_MODULE,
3410     - .cra_u = {
3411     - .cipher = {
3412     - .cia_min_keysize = SPECK128_128_KEY_SIZE,
3413     - .cia_max_keysize = SPECK128_256_KEY_SIZE,
3414     - .cia_setkey = speck128_setkey,
3415     - .cia_encrypt = speck128_encrypt,
3416     - .cia_decrypt = speck128_decrypt
3417     - }
3418     - }
3419     - }, {
3420     - .cra_name = "speck64",
3421     - .cra_driver_name = "speck64-generic",
3422     - .cra_priority = 100,
3423     - .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
3424     - .cra_blocksize = SPECK64_BLOCK_SIZE,
3425     - .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
3426     - .cra_module = THIS_MODULE,
3427     - .cra_u = {
3428     - .cipher = {
3429     - .cia_min_keysize = SPECK64_96_KEY_SIZE,
3430     - .cia_max_keysize = SPECK64_128_KEY_SIZE,
3431     - .cia_setkey = speck64_setkey,
3432     - .cia_encrypt = speck64_encrypt,
3433     - .cia_decrypt = speck64_decrypt
3434     - }
3435     - }
3436     - }
3437     -};
3438     -
3439     -static int __init speck_module_init(void)
3440     -{
3441     - return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
3442     -}
3443     -
3444     -static void __exit speck_module_exit(void)
3445     -{
3446     - crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
3447     -}
3448     -
3449     -module_init(speck_module_init);
3450     -module_exit(speck_module_exit);
3451     -
3452     -MODULE_DESCRIPTION("Speck block cipher (generic)");
3453     -MODULE_LICENSE("GPL");
3454     -MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
3455     -MODULE_ALIAS_CRYPTO("speck128");
3456     -MODULE_ALIAS_CRYPTO("speck128-generic");
3457     -MODULE_ALIAS_CRYPTO("speck64");
3458     -MODULE_ALIAS_CRYPTO("speck64-generic");
3459     diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
3460     index bdde95e8d369..6e0a054bb61d 100644
3461     --- a/crypto/tcrypt.c
3462     +++ b/crypto/tcrypt.c
3463     @@ -1103,6 +1103,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
3464     break;
3465     }
3466    
3467     + if (speed[i].klen)
3468     + crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
3469     +
3470     pr_info("test%3u "
3471     "(%5u byte blocks,%5u bytes per update,%4u updates): ",
3472     i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
3473     diff --git a/crypto/testmgr.c b/crypto/testmgr.c
3474     index a1d42245082a..1c9bf38e59ea 100644
3475     --- a/crypto/testmgr.c
3476     +++ b/crypto/testmgr.c
3477     @@ -3037,18 +3037,6 @@ static const struct alg_test_desc alg_test_descs[] = {
3478     .suite = {
3479     .cipher = __VECS(sm4_tv_template)
3480     }
3481     - }, {
3482     - .alg = "ecb(speck128)",
3483     - .test = alg_test_skcipher,
3484     - .suite = {
3485     - .cipher = __VECS(speck128_tv_template)
3486     - }
3487     - }, {
3488     - .alg = "ecb(speck64)",
3489     - .test = alg_test_skcipher,
3490     - .suite = {
3491     - .cipher = __VECS(speck64_tv_template)
3492     - }
3493     }, {
3494     .alg = "ecb(tea)",
3495     .test = alg_test_skcipher,
3496     @@ -3576,18 +3564,6 @@ static const struct alg_test_desc alg_test_descs[] = {
3497     .suite = {
3498     .cipher = __VECS(serpent_xts_tv_template)
3499     }
3500     - }, {
3501     - .alg = "xts(speck128)",
3502     - .test = alg_test_skcipher,
3503     - .suite = {
3504     - .cipher = __VECS(speck128_xts_tv_template)
3505     - }
3506     - }, {
3507     - .alg = "xts(speck64)",
3508     - .test = alg_test_skcipher,
3509     - .suite = {
3510     - .cipher = __VECS(speck64_xts_tv_template)
3511     - }
3512     }, {
3513     .alg = "xts(twofish)",
3514     .test = alg_test_skcipher,
3515     diff --git a/crypto/testmgr.h b/crypto/testmgr.h
3516     index 173111c70746..0b3d7cadbe93 100644
3517     --- a/crypto/testmgr.h
3518     +++ b/crypto/testmgr.h
3519     @@ -10198,744 +10198,6 @@ static const struct cipher_testvec sm4_tv_template[] = {
3520     }
3521     };
3522    
3523     -/*
3524     - * Speck test vectors taken from the original paper:
3525     - * "The Simon and Speck Families of Lightweight Block Ciphers"
3526     - * https://eprint.iacr.org/2013/404.pdf
3527     - *
3528     - * Note that the paper does not make byte and word order clear. But it was
3529     - * confirmed with the authors that the intended orders are little endian byte
3530     - * order and (y, x) word order. Equivalently, the printed test vectors, when
3531     - * looking at only the bytes (ignoring the whitespace that divides them into
3532     - * words), are backwards: the left-most byte is actually the one with the
3533     - * highest memory address, while the right-most byte is actually the one with
3534     - * the lowest memory address.
3535     - */
3536     -
3537     -static const struct cipher_testvec speck128_tv_template[] = {
3538     - { /* Speck128/128 */
3539     - .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
3540     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
3541     - .klen = 16,
3542     - .ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74"
3543     - "\x20\x65\x71\x75\x69\x76\x61\x6c",
3544     - .ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
3545     - "\x65\x32\x78\x79\x51\x98\x5d\xa6",
3546     - .len = 16,
3547     - }, { /* Speck128/192 */
3548     - .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
3549     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3550     - "\x10\x11\x12\x13\x14\x15\x16\x17",
3551     - .klen = 24,
3552     - .ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
3553     - "\x68\x69\x65\x66\x20\x48\x61\x72",
3554     - .ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
3555     - "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
3556     - .len = 16,
3557     - }, { /* Speck128/256 */
3558     - .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
3559     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3560     - "\x10\x11\x12\x13\x14\x15\x16\x17"
3561     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
3562     - .klen = 32,
3563     - .ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
3564     - "\x49\x6e\x20\x74\x68\x6f\x73\x65",
3565     - .ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
3566     - "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
3567     - .len = 16,
3568     - },
3569     -};
3570     -
3571     -/*
3572     - * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
3573     - * ciphertext recomputed with Speck128 as the cipher
3574     - */
3575     -static const struct cipher_testvec speck128_xts_tv_template[] = {
3576     - {
3577     - .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
3578     - "\x00\x00\x00\x00\x00\x00\x00\x00"
3579     - "\x00\x00\x00\x00\x00\x00\x00\x00"
3580     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3581     - .klen = 32,
3582     - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3583     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3584     - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
3585     - "\x00\x00\x00\x00\x00\x00\x00\x00"
3586     - "\x00\x00\x00\x00\x00\x00\x00\x00"
3587     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3588     - .ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
3589     - "\x3b\x99\x4a\x64\x74\x77\xac\xed"
3590     - "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
3591     - "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
3592     - .len = 32,
3593     - }, {
3594     - .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
3595     - "\x11\x11\x11\x11\x11\x11\x11\x11"
3596     - "\x22\x22\x22\x22\x22\x22\x22\x22"
3597     - "\x22\x22\x22\x22\x22\x22\x22\x22",
3598     - .klen = 32,
3599     - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3600     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3601     - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
3602     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3603     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3604     - "\x44\x44\x44\x44\x44\x44\x44\x44",
3605     - .ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
3606     - "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
3607     - "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
3608     - "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
3609     - .len = 32,
3610     - }, {
3611     - .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
3612     - "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
3613     - "\x22\x22\x22\x22\x22\x22\x22\x22"
3614     - "\x22\x22\x22\x22\x22\x22\x22\x22",
3615     - .klen = 32,
3616     - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3617     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3618     - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
3619     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3620     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3621     - "\x44\x44\x44\x44\x44\x44\x44\x44",
3622     - .ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
3623     - "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
3624     - "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
3625     - "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
3626     - .len = 32,
3627     - }, {
3628     - .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
3629     - "\x23\x53\x60\x28\x74\x71\x35\x26"
3630     - "\x31\x41\x59\x26\x53\x58\x97\x93"
3631     - "\x23\x84\x62\x64\x33\x83\x27\x95",
3632     - .klen = 32,
3633     - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3634     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3635     - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
3636     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3637     - "\x10\x11\x12\x13\x14\x15\x16\x17"
3638     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3639     - "\x20\x21\x22\x23\x24\x25\x26\x27"
3640     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3641     - "\x30\x31\x32\x33\x34\x35\x36\x37"
3642     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3643     - "\x40\x41\x42\x43\x44\x45\x46\x47"
3644     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3645     - "\x50\x51\x52\x53\x54\x55\x56\x57"
3646     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3647     - "\x60\x61\x62\x63\x64\x65\x66\x67"
3648     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3649     - "\x70\x71\x72\x73\x74\x75\x76\x77"
3650     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3651     - "\x80\x81\x82\x83\x84\x85\x86\x87"
3652     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3653     - "\x90\x91\x92\x93\x94\x95\x96\x97"
3654     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3655     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3656     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3657     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3658     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3659     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3660     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3661     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3662     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3663     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3664     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3665     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3666     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
3667     - "\x00\x01\x02\x03\x04\x05\x06\x07"
3668     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3669     - "\x10\x11\x12\x13\x14\x15\x16\x17"
3670     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3671     - "\x20\x21\x22\x23\x24\x25\x26\x27"
3672     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3673     - "\x30\x31\x32\x33\x34\x35\x36\x37"
3674     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3675     - "\x40\x41\x42\x43\x44\x45\x46\x47"
3676     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3677     - "\x50\x51\x52\x53\x54\x55\x56\x57"
3678     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3679     - "\x60\x61\x62\x63\x64\x65\x66\x67"
3680     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3681     - "\x70\x71\x72\x73\x74\x75\x76\x77"
3682     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3683     - "\x80\x81\x82\x83\x84\x85\x86\x87"
3684     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3685     - "\x90\x91\x92\x93\x94\x95\x96\x97"
3686     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3687     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3688     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3689     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3690     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3691     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3692     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3693     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3694     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3695     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3696     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3697     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3698     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
3699     - .ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
3700     - "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
3701     - "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
3702     - "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
3703     - "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
3704     - "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
3705     - "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
3706     - "\x19\xc5\x58\x84\x63\xb9\x12\x68"
3707     - "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
3708     - "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
3709     - "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
3710     - "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
3711     - "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
3712     - "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
3713     - "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
3714     - "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
3715     - "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
3716     - "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
3717     - "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
3718     - "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
3719     - "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
3720     - "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
3721     - "\xa5\x20\xac\x24\x1c\x73\x59\x73"
3722     - "\x58\x61\x3a\x87\x58\xb3\x20\x56"
3723     - "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
3724     - "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
3725     - "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
3726     - "\x09\x35\x71\x50\x65\xac\x92\xe3"
3727     - "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
3728     - "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
3729     - "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
3730     - "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
3731     - "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
3732     - "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
3733     - "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
3734     - "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
3735     - "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
3736     - "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
3737     - "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
3738     - "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
3739     - "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
3740     - "\x87\x30\xac\xd5\xea\x73\x49\x10"
3741     - "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
3742     - "\x66\x02\x35\x3d\x60\x06\x36\x4f"
3743     - "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
3744     - "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
3745     - "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
3746     - "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
3747     - "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
3748     - "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
3749     - "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
3750     - "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
3751     - "\x51\x66\x02\x69\x04\x97\x36\xd4"
3752     - "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
3753     - "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
3754     - "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
3755     - "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
3756     - "\x03\xe7\x05\x39\xf5\x05\x26\xee"
3757     - "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
3758     - "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
3759     - "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
3760     - "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
3761     - "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
3762     - "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
3763     - .len = 512,
3764     - }, {
3765     - .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
3766     - "\x23\x53\x60\x28\x74\x71\x35\x26"
3767     - "\x62\x49\x77\x57\x24\x70\x93\x69"
3768     - "\x99\x59\x57\x49\x66\x96\x76\x27"
3769     - "\x31\x41\x59\x26\x53\x58\x97\x93"
3770     - "\x23\x84\x62\x64\x33\x83\x27\x95"
3771     - "\x02\x88\x41\x97\x16\x93\x99\x37"
3772     - "\x51\x05\x82\x09\x74\x94\x45\x92",
3773     - .klen = 64,
3774     - .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
3775     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3776     - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
3777     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3778     - "\x10\x11\x12\x13\x14\x15\x16\x17"
3779     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3780     - "\x20\x21\x22\x23\x24\x25\x26\x27"
3781     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3782     - "\x30\x31\x32\x33\x34\x35\x36\x37"
3783     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3784     - "\x40\x41\x42\x43\x44\x45\x46\x47"
3785     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3786     - "\x50\x51\x52\x53\x54\x55\x56\x57"
3787     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3788     - "\x60\x61\x62\x63\x64\x65\x66\x67"
3789     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3790     - "\x70\x71\x72\x73\x74\x75\x76\x77"
3791     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3792     - "\x80\x81\x82\x83\x84\x85\x86\x87"
3793     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3794     - "\x90\x91\x92\x93\x94\x95\x96\x97"
3795     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3796     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3797     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3798     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3799     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3800     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3801     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3802     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3803     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3804     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3805     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3806     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3807     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
3808     - "\x00\x01\x02\x03\x04\x05\x06\x07"
3809     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3810     - "\x10\x11\x12\x13\x14\x15\x16\x17"
3811     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3812     - "\x20\x21\x22\x23\x24\x25\x26\x27"
3813     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3814     - "\x30\x31\x32\x33\x34\x35\x36\x37"
3815     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3816     - "\x40\x41\x42\x43\x44\x45\x46\x47"
3817     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3818     - "\x50\x51\x52\x53\x54\x55\x56\x57"
3819     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
3820     - "\x60\x61\x62\x63\x64\x65\x66\x67"
3821     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
3822     - "\x70\x71\x72\x73\x74\x75\x76\x77"
3823     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
3824     - "\x80\x81\x82\x83\x84\x85\x86\x87"
3825     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
3826     - "\x90\x91\x92\x93\x94\x95\x96\x97"
3827     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
3828     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
3829     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
3830     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
3831     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
3832     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
3833     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
3834     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
3835     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
3836     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
3837     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
3838     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
3839     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
3840     - .ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
3841     - "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
3842     - "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
3843     - "\x92\x99\xde\xd3\x76\xed\xcd\x63"
3844     - "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
3845     - "\x79\x36\x31\x19\x62\xae\x10\x7e"
3846     - "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
3847     - "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
3848     - "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
3849     - "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
3850     - "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
3851     - "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
3852     - "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
3853     - "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
3854     - "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
3855     - "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
3856     - "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
3857     - "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
3858     - "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
3859     - "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
3860     - "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
3861     - "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
3862     - "\xac\xa5\xd0\x38\xef\x19\x56\x53"
3863     - "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
3864     - "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
3865     - "\xed\x22\x34\x1c\x5d\xed\x17\x06"
3866     - "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
3867     - "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
3868     - "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
3869     - "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
3870     - "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
3871     - "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
3872     - "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
3873     - "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
3874     - "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
3875     - "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
3876     - "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
3877     - "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
3878     - "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
3879     - "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
3880     - "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
3881     - "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
3882     - "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
3883     - "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
3884     - "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
3885     - "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
3886     - "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
3887     - "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
3888     - "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
3889     - "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
3890     - "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
3891     - "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
3892     - "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
3893     - "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
3894     - "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
3895     - "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
3896     - "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
3897     - "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
3898     - "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
3899     - "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
3900     - "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
3901     - "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
3902     - "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
3903     - "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
3904     - .len = 512,
3905     - .also_non_np = 1,
3906     - .np = 3,
3907     - .tap = { 512 - 20, 4, 16 },
3908     - }
3909     -};
3910     -
3911     -static const struct cipher_testvec speck64_tv_template[] = {
3912     - { /* Speck64/96 */
3913     - .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
3914     - "\x10\x11\x12\x13",
3915     - .klen = 12,
3916     - .ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74",
3917     - .ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
3918     - .len = 8,
3919     - }, { /* Speck64/128 */
3920     - .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
3921     - "\x10\x11\x12\x13\x18\x19\x1a\x1b",
3922     - .klen = 16,
3923     - .ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
3924     - .ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
3925     - .len = 8,
3926     - },
3927     -};
3928     -
3929     -/*
3930     - * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the
3931     - * ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted
3932     - */
3933     -static const struct cipher_testvec speck64_xts_tv_template[] = {
3934     - {
3935     - .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
3936     - "\x00\x00\x00\x00\x00\x00\x00\x00"
3937     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3938     - .klen = 24,
3939     - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3940     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3941     - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
3942     - "\x00\x00\x00\x00\x00\x00\x00\x00"
3943     - "\x00\x00\x00\x00\x00\x00\x00\x00"
3944     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3945     - .ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
3946     - "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
3947     - "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
3948     - "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
3949     - .len = 32,
3950     - }, {
3951     - .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
3952     - "\x11\x11\x11\x11\x11\x11\x11\x11"
3953     - "\x22\x22\x22\x22\x22\x22\x22\x22",
3954     - .klen = 24,
3955     - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3956     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3957     - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
3958     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3959     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3960     - "\x44\x44\x44\x44\x44\x44\x44\x44",
3961     - .ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
3962     - "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
3963     - "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
3964     - "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
3965     - .len = 32,
3966     - }, {
3967     - .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
3968     - "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
3969     - "\x22\x22\x22\x22\x22\x22\x22\x22",
3970     - .klen = 24,
3971     - .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
3972     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3973     - .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
3974     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3975     - "\x44\x44\x44\x44\x44\x44\x44\x44"
3976     - "\x44\x44\x44\x44\x44\x44\x44\x44",
3977     - .ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
3978     - "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
3979     - "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
3980     - "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
3981     - .len = 32,
3982     - }, {
3983     - .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
3984     - "\x23\x53\x60\x28\x74\x71\x35\x26"
3985     - "\x31\x41\x59\x26\x53\x58\x97\x93",
3986     - .klen = 24,
3987     - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
3988     - "\x00\x00\x00\x00\x00\x00\x00\x00",
3989     - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
3990     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
3991     - "\x10\x11\x12\x13\x14\x15\x16\x17"
3992     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
3993     - "\x20\x21\x22\x23\x24\x25\x26\x27"
3994     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
3995     - "\x30\x31\x32\x33\x34\x35\x36\x37"
3996     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
3997     - "\x40\x41\x42\x43\x44\x45\x46\x47"
3998     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
3999     - "\x50\x51\x52\x53\x54\x55\x56\x57"
4000     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
4001     - "\x60\x61\x62\x63\x64\x65\x66\x67"
4002     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
4003     - "\x70\x71\x72\x73\x74\x75\x76\x77"
4004     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
4005     - "\x80\x81\x82\x83\x84\x85\x86\x87"
4006     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
4007     - "\x90\x91\x92\x93\x94\x95\x96\x97"
4008     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
4009     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
4010     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
4011     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
4012     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
4013     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
4014     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
4015     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
4016     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
4017     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
4018     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
4019     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
4020     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
4021     - "\x00\x01\x02\x03\x04\x05\x06\x07"
4022     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4023     - "\x10\x11\x12\x13\x14\x15\x16\x17"
4024     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
4025     - "\x20\x21\x22\x23\x24\x25\x26\x27"
4026     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
4027     - "\x30\x31\x32\x33\x34\x35\x36\x37"
4028     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
4029     - "\x40\x41\x42\x43\x44\x45\x46\x47"
4030     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
4031     - "\x50\x51\x52\x53\x54\x55\x56\x57"
4032     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
4033     - "\x60\x61\x62\x63\x64\x65\x66\x67"
4034     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
4035     - "\x70\x71\x72\x73\x74\x75\x76\x77"
4036     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
4037     - "\x80\x81\x82\x83\x84\x85\x86\x87"
4038     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
4039     - "\x90\x91\x92\x93\x94\x95\x96\x97"
4040     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
4041     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
4042     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
4043     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
4044     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
4045     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
4046     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
4047     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
4048     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
4049     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
4050     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
4051     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
4052     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
4053     - .ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
4054     - "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
4055     - "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
4056     - "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
4057     - "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
4058     - "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
4059     - "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
4060     - "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
4061     - "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
4062     - "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
4063     - "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
4064     - "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
4065     - "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
4066     - "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
4067     - "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
4068     - "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
4069     - "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
4070     - "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
4071     - "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
4072     - "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
4073     - "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
4074     - "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
4075     - "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
4076     - "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
4077     - "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
4078     - "\x07\xff\xf3\x72\x74\x48\xb5\x40"
4079     - "\x50\xb5\xdd\x90\x43\x31\x18\x15"
4080     - "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
4081     - "\x29\x93\x90\x8b\xda\x07\xf0\x35"
4082     - "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
4083     - "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
4084     - "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
4085     - "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
4086     - "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
4087     - "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
4088     - "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
4089     - "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
4090     - "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
4091     - "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
4092     - "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
4093     - "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
4094     - "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
4095     - "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
4096     - "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
4097     - "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
4098     - "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
4099     - "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
4100     - "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
4101     - "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
4102     - "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
4103     - "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
4104     - "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
4105     - "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
4106     - "\x59\xda\xee\x1a\x22\x18\xda\x0d"
4107     - "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
4108     - "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
4109     - "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
4110     - "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
4111     - "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
4112     - "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
4113     - "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
4114     - "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
4115     - "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
4116     - "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
4117     - .len = 512,
4118     - }, {
4119     - .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
4120     - "\x23\x53\x60\x28\x74\x71\x35\x26"
4121     - "\x62\x49\x77\x57\x24\x70\x93\x69"
4122     - "\x99\x59\x57\x49\x66\x96\x76\x27",
4123     - .klen = 32,
4124     - .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
4125     - "\x00\x00\x00\x00\x00\x00\x00\x00",
4126     - .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
4127     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4128     - "\x10\x11\x12\x13\x14\x15\x16\x17"
4129     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
4130     - "\x20\x21\x22\x23\x24\x25\x26\x27"
4131     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
4132     - "\x30\x31\x32\x33\x34\x35\x36\x37"
4133     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
4134     - "\x40\x41\x42\x43\x44\x45\x46\x47"
4135     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
4136     - "\x50\x51\x52\x53\x54\x55\x56\x57"
4137     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
4138     - "\x60\x61\x62\x63\x64\x65\x66\x67"
4139     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
4140     - "\x70\x71\x72\x73\x74\x75\x76\x77"
4141     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
4142     - "\x80\x81\x82\x83\x84\x85\x86\x87"
4143     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
4144     - "\x90\x91\x92\x93\x94\x95\x96\x97"
4145     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
4146     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
4147     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
4148     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
4149     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
4150     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
4151     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
4152     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
4153     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
4154     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
4155     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
4156     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
4157     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
4158     - "\x00\x01\x02\x03\x04\x05\x06\x07"
4159     - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
4160     - "\x10\x11\x12\x13\x14\x15\x16\x17"
4161     - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
4162     - "\x20\x21\x22\x23\x24\x25\x26\x27"
4163     - "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
4164     - "\x30\x31\x32\x33\x34\x35\x36\x37"
4165     - "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
4166     - "\x40\x41\x42\x43\x44\x45\x46\x47"
4167     - "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
4168     - "\x50\x51\x52\x53\x54\x55\x56\x57"
4169     - "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
4170     - "\x60\x61\x62\x63\x64\x65\x66\x67"
4171     - "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
4172     - "\x70\x71\x72\x73\x74\x75\x76\x77"
4173     - "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
4174     - "\x80\x81\x82\x83\x84\x85\x86\x87"
4175     - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
4176     - "\x90\x91\x92\x93\x94\x95\x96\x97"
4177     - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
4178     - "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
4179     - "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
4180     - "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
4181     - "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
4182     - "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
4183     - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
4184     - "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
4185     - "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
4186     - "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
4187     - "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
4188     - "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
4189     - "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
4190     - .ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
4191     - "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
4192     - "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
4193     - "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
4194     - "\x78\x16\xea\x80\xdb\x33\x75\x94"
4195     - "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
4196     - "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
4197     - "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
4198     - "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
4199     - "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
4200     - "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
4201     - "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
4202     - "\x84\x5e\x46\xed\x20\x89\xb6\x44"
4203     - "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
4204     - "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
4205     - "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
4206     - "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
4207     - "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
4208     - "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
4209     - "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
4210     - "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
4211     - "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
4212     - "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
4213     - "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
4214     - "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
4215     - "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
4216     - "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
4217     - "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
4218     - "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
4219     - "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
4220     - "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
4221     - "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
4222     - "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
4223     - "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
4224     - "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
4225     - "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
4226     - "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
4227     - "\x52\x7f\x29\x51\x94\x20\x67\x3c"
4228     - "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
4229     - "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
4230     - "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
4231     - "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
4232     - "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
4233     - "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
4234     - "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
4235     - "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
4236     - "\x65\x53\x0f\x41\x11\xbd\x98\x99"
4237     - "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
4238     - "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
4239     - "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
4240     - "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
4241     - "\x63\x51\x34\x9d\x96\x12\xae\xce"
4242     - "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
4243     - "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
4244     - "\x54\x27\x74\xbb\x10\x86\x57\x46"
4245     - "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
4246     - "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
4247     - "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
4248     - "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
4249     - "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
4250     - "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
4251     - "\x9b\x63\x76\x32\x2f\x19\x72\x10"
4252     - "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
4253     - "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
4254     - .len = 512,
4255     - .also_non_np = 1,
4256     - .np = 3,
4257     - .tap = { 512 - 20, 4, 16 },
4258     - }
4259     -};
4260     -
4261     /* Cast6 test vectors from RFC 2612 */
4262     static const struct cipher_testvec cast6_tv_template[] = {
4263     {
4264     diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
4265     index cf4fc0161164..e43cb71b6972 100644
4266     --- a/drivers/acpi/acpi_lpit.c
4267     +++ b/drivers/acpi/acpi_lpit.c
4268     @@ -117,11 +117,17 @@ static void lpit_update_residency(struct lpit_residency_info *info,
4269     if (!info->iomem_addr)
4270     return;
4271    
4272     + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
4273     + return;
4274     +
4275     /* Silently fail, if cpuidle attribute group is not present */
4276     sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
4277     &dev_attr_low_power_idle_system_residency_us.attr,
4278     "cpuidle");
4279     } else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
4280     + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
4281     + return;
4282     +
4283     /* Silently fail, if cpuidle attribute group is not present */
4284     sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
4285     &dev_attr_low_power_idle_cpu_residency_us.attr,
4286     diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
4287     index bf64cfa30feb..969bf8d515c0 100644
4288     --- a/drivers/acpi/acpi_lpss.c
4289     +++ b/drivers/acpi/acpi_lpss.c
4290     @@ -327,9 +327,11 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
4291     { "INT33FC", },
4292    
4293     /* Braswell LPSS devices */
4294     + { "80862286", LPSS_ADDR(lpss_dma_desc) },
4295     { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
4296     { "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
4297     { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
4298     + { "808622C0", LPSS_ADDR(lpss_dma_desc) },
4299     { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
4300    
4301     /* Broadwell LPSS devices */
4302     diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
4303     index 449d86d39965..fc447410ae4d 100644
4304     --- a/drivers/acpi/acpi_processor.c
4305     +++ b/drivers/acpi/acpi_processor.c
4306     @@ -643,7 +643,7 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
4307    
4308     status = acpi_get_type(handle, &acpi_type);
4309     if (ACPI_FAILURE(status))
4310     - return false;
4311     + return status;
4312    
4313     switch (acpi_type) {
4314     case ACPI_TYPE_PROCESSOR:
4315     @@ -663,11 +663,12 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
4316     }
4317    
4318     processor_validated_ids_update(uid);
4319     - return true;
4320     + return AE_OK;
4321    
4322     err:
4323     + /* Exit on error, but don't abort the namespace walk */
4324     acpi_handle_info(handle, "Invalid processor object\n");
4325     - return false;
4326     + return AE_OK;
4327    
4328     }
4329    
4330     diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
4331     index e9fb0bf3c8d2..78f9de260d5f 100644
4332     --- a/drivers/acpi/acpica/dsopcode.c
4333     +++ b/drivers/acpi/acpica/dsopcode.c
4334     @@ -417,6 +417,10 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
4335     ACPI_FORMAT_UINT64(obj_desc->region.address),
4336     obj_desc->region.length));
4337    
4338     + status = acpi_ut_add_address_range(obj_desc->region.space_id,
4339     + obj_desc->region.address,
4340     + obj_desc->region.length, node);
4341     +
4342     /* Now the address and length are valid for this opregion */
4343    
4344     obj_desc->region.flags |= AOPOBJ_DATA_VALID;
4345     diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
4346     index 34fc2f7476ed..b0789c483b0f 100644
4347     --- a/drivers/acpi/acpica/psloop.c
4348     +++ b/drivers/acpi/acpica/psloop.c
4349     @@ -417,6 +417,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
4350     union acpi_parse_object *op = NULL; /* current op */
4351     struct acpi_parse_state *parser_state;
4352     u8 *aml_op_start = NULL;
4353     + u8 opcode_length;
4354    
4355     ACPI_FUNCTION_TRACE_PTR(ps_parse_loop, walk_state);
4356    
4357     @@ -540,8 +541,19 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
4358     "Skip parsing opcode %s",
4359     acpi_ps_get_opcode_name
4360     (walk_state->opcode)));
4361     +
4362     + /*
4363     + * Determine the opcode length before skipping the opcode.
4364     + * An opcode can be 1 byte or 2 bytes in length.
4365     + */
4366     + opcode_length = 1;
4367     + if ((walk_state->opcode & 0xFF00) ==
4368     + AML_EXTENDED_OPCODE) {
4369     + opcode_length = 2;
4370     + }
4371     walk_state->parser_state.aml =
4372     - walk_state->aml + 1;
4373     + walk_state->aml + opcode_length;
4374     +
4375     walk_state->parser_state.aml =
4376     acpi_ps_get_next_package_end
4377     (&walk_state->parser_state);
4378     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
4379     index b072cfc5f20e..19b641208d86 100644
4380     --- a/drivers/acpi/nfit/core.c
4381     +++ b/drivers/acpi/nfit/core.c
4382     @@ -2466,7 +2466,8 @@ static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
4383     return cmd_rc;
4384     }
4385    
4386     -static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
4387     +static int ars_start(struct acpi_nfit_desc *acpi_desc,
4388     + struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
4389     {
4390     int rc;
4391     int cmd_rc;
4392     @@ -2477,7 +2478,7 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa
4393     memset(&ars_start, 0, sizeof(ars_start));
4394     ars_start.address = spa->address;
4395     ars_start.length = spa->length;
4396     - if (test_bit(ARS_SHORT, &nfit_spa->ars_state))
4397     + if (req_type == ARS_REQ_SHORT)
4398     ars_start.flags = ND_ARS_RETURN_PREV_DATA;
4399     if (nfit_spa_type(spa) == NFIT_SPA_PM)
4400     ars_start.type = ND_ARS_PERSISTENT;
4401     @@ -2534,6 +2535,15 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
4402     struct nd_region *nd_region = nfit_spa->nd_region;
4403     struct device *dev;
4404    
4405     + lockdep_assert_held(&acpi_desc->init_mutex);
4406     + /*
4407     + * Only advance the ARS state for ARS runs initiated by the
4408     + * kernel, ignore ARS results from BIOS initiated runs for scrub
4409     + * completion tracking.
4410     + */
4411     + if (acpi_desc->scrub_spa != nfit_spa)
4412     + return;
4413     +
4414     if ((ars_status->address >= spa->address && ars_status->address
4415     < spa->address + spa->length)
4416     || (ars_status->address < spa->address)) {
4417     @@ -2553,28 +2563,13 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
4418     } else
4419     return;
4420    
4421     - if (test_bit(ARS_DONE, &nfit_spa->ars_state))
4422     - return;
4423     -
4424     - if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state))
4425     - return;
4426     -
4427     + acpi_desc->scrub_spa = NULL;
4428     if (nd_region) {
4429     dev = nd_region_dev(nd_region);
4430     nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
4431     } else
4432     dev = acpi_desc->dev;
4433     -
4434     - dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index,
4435     - test_bit(ARS_SHORT, &nfit_spa->ars_state)
4436     - ? "short" : "long");
4437     - clear_bit(ARS_SHORT, &nfit_spa->ars_state);
4438     - if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
4439     - set_bit(ARS_SHORT, &nfit_spa->ars_state);
4440     - set_bit(ARS_REQ, &nfit_spa->ars_state);
4441     - dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
4442     - } else
4443     - set_bit(ARS_DONE, &nfit_spa->ars_state);
4444     + dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
4445     }
4446    
4447     static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
4448     @@ -2855,46 +2850,55 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
4449     return 0;
4450     }
4451    
4452     -static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa,
4453     - int *query_rc)
4454     +static int ars_register(struct acpi_nfit_desc *acpi_desc,
4455     + struct nfit_spa *nfit_spa)
4456     {
4457     - int rc = *query_rc;
4458     + int rc;
4459    
4460     - if (no_init_ars)
4461     + if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
4462     return acpi_nfit_register_region(acpi_desc, nfit_spa);
4463    
4464     - set_bit(ARS_REQ, &nfit_spa->ars_state);
4465     - set_bit(ARS_SHORT, &nfit_spa->ars_state);
4466     + set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
4467     + set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
4468    
4469     - switch (rc) {
4470     + switch (acpi_nfit_query_poison(acpi_desc)) {
4471     case 0:
4472     case -EAGAIN:
4473     - rc = ars_start(acpi_desc, nfit_spa);
4474     - if (rc == -EBUSY) {
4475     - *query_rc = rc;
4476     + rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
4477     + /* shouldn't happen, try again later */
4478     + if (rc == -EBUSY)
4479     break;
4480     - } else if (rc == 0) {
4481     - rc = acpi_nfit_query_poison(acpi_desc);
4482     - } else {
4483     + if (rc) {
4484     set_bit(ARS_FAILED, &nfit_spa->ars_state);
4485     break;
4486     }
4487     - if (rc == -EAGAIN)
4488     - clear_bit(ARS_SHORT, &nfit_spa->ars_state);
4489     - else if (rc == 0)
4490     - ars_complete(acpi_desc, nfit_spa);
4491     + clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
4492     + rc = acpi_nfit_query_poison(acpi_desc);
4493     + if (rc)
4494     + break;
4495     + acpi_desc->scrub_spa = nfit_spa;
4496     + ars_complete(acpi_desc, nfit_spa);
4497     + /*
4498     + * If ars_complete() says we didn't complete the
4499     + * short scrub, we'll try again with a long
4500     + * request.
4501     + */
4502     + acpi_desc->scrub_spa = NULL;
4503     break;
4504     case -EBUSY:
4505     + case -ENOMEM:
4506     case -ENOSPC:
4507     + /*
4508     + * BIOS was using ARS, wait for it to complete (or
4509     + * resources to become available) and then perform our
4510     + * own scrubs.
4511     + */
4512     break;
4513     default:
4514     set_bit(ARS_FAILED, &nfit_spa->ars_state);
4515     break;
4516     }
4517    
4518     - if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state))
4519     - set_bit(ARS_REQ, &nfit_spa->ars_state);
4520     -
4521     return acpi_nfit_register_region(acpi_desc, nfit_spa);
4522     }
4523    
4524     @@ -2916,6 +2920,8 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
4525     struct device *dev = acpi_desc->dev;
4526     struct nfit_spa *nfit_spa;
4527    
4528     + lockdep_assert_held(&acpi_desc->init_mutex);
4529     +
4530     if (acpi_desc->cancel)
4531     return 0;
4532    
4533     @@ -2939,21 +2945,49 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
4534    
4535     ars_complete_all(acpi_desc);
4536     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
4537     + enum nfit_ars_state req_type;
4538     + int rc;
4539     +
4540     if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
4541     continue;
4542     - if (test_bit(ARS_REQ, &nfit_spa->ars_state)) {
4543     - int rc = ars_start(acpi_desc, nfit_spa);
4544     -
4545     - clear_bit(ARS_DONE, &nfit_spa->ars_state);
4546     - dev = nd_region_dev(nfit_spa->nd_region);
4547     - dev_dbg(dev, "ARS: range %d ARS start (%d)\n",
4548     - nfit_spa->spa->range_index, rc);
4549     - if (rc == 0 || rc == -EBUSY)
4550     - return 1;
4551     - dev_err(dev, "ARS: range %d ARS failed (%d)\n",
4552     - nfit_spa->spa->range_index, rc);
4553     - set_bit(ARS_FAILED, &nfit_spa->ars_state);
4554     +
4555     + /* prefer short ARS requests first */
4556     + if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
4557     + req_type = ARS_REQ_SHORT;
4558     + else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
4559     + req_type = ARS_REQ_LONG;
4560     + else
4561     + continue;
4562     + rc = ars_start(acpi_desc, nfit_spa, req_type);
4563     +
4564     + dev = nd_region_dev(nfit_spa->nd_region);
4565     + dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
4566     + nfit_spa->spa->range_index,
4567     + req_type == ARS_REQ_SHORT ? "short" : "long",
4568     + rc);
4569     + /*
4570     + * Hmm, we raced someone else starting ARS? Try again in
4571     + * a bit.
4572     + */
4573     + if (rc == -EBUSY)
4574     + return 1;
4575     + if (rc == 0) {
4576     + dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
4577     + "scrub start while range %d active\n",
4578     + acpi_desc->scrub_spa->spa->range_index);
4579     + clear_bit(req_type, &nfit_spa->ars_state);
4580     + acpi_desc->scrub_spa = nfit_spa;
4581     + /*
4582     + * Consider this spa last for future scrub
4583     + * requests
4584     + */
4585     + list_move_tail(&nfit_spa->list, &acpi_desc->spas);
4586     + return 1;
4587     }
4588     +
4589     + dev_err(dev, "ARS: range %d ARS failed (%d)\n",
4590     + nfit_spa->spa->range_index, rc);
4591     + set_bit(ARS_FAILED, &nfit_spa->ars_state);
4592     }
4593     return 0;
4594     }
4595     @@ -3009,6 +3043,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
4596     struct nd_cmd_ars_cap ars_cap;
4597     int rc;
4598    
4599     + set_bit(ARS_FAILED, &nfit_spa->ars_state);
4600     memset(&ars_cap, 0, sizeof(ars_cap));
4601     rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
4602     if (rc < 0)
4603     @@ -3025,16 +3060,14 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
4604     nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
4605     acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
4606     clear_bit(ARS_FAILED, &nfit_spa->ars_state);
4607     - set_bit(ARS_REQ, &nfit_spa->ars_state);
4608     }
4609    
4610     static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
4611     {
4612     struct nfit_spa *nfit_spa;
4613     - int rc, query_rc;
4614     + int rc;
4615    
4616     list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
4617     - set_bit(ARS_FAILED, &nfit_spa->ars_state);
4618     switch (nfit_spa_type(nfit_spa->spa)) {
4619     case NFIT_SPA_VOLATILE:
4620     case NFIT_SPA_PM:
4621     @@ -3043,20 +3076,12 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
4622     }
4623     }
4624    
4625     - /*
4626     - * Reap any results that might be pending before starting new
4627     - * short requests.
4628     - */
4629     - query_rc = acpi_nfit_query_poison(acpi_desc);
4630     - if (query_rc == 0)
4631     - ars_complete_all(acpi_desc);
4632     -
4633     list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
4634     switch (nfit_spa_type(nfit_spa->spa)) {
4635     case NFIT_SPA_VOLATILE:
4636     case NFIT_SPA_PM:
4637     /* register regions and kick off initial ARS run */
4638     - rc = ars_register(acpi_desc, nfit_spa, &query_rc);
4639     + rc = ars_register(acpi_desc, nfit_spa);
4640     if (rc)
4641     return rc;
4642     break;
4643     @@ -3251,7 +3276,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
4644     return 0;
4645     }
4646    
4647     -int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
4648     +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
4649     + enum nfit_ars_state req_type)
4650     {
4651     struct device *dev = acpi_desc->dev;
4652     int scheduled = 0, busy = 0;
4653     @@ -3271,14 +3297,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
4654     if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
4655     continue;
4656    
4657     - if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
4658     + if (test_and_set_bit(req_type, &nfit_spa->ars_state))
4659     busy++;
4660     - set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
4661     - } else {
4662     - if (test_bit(ARS_SHORT, &flags))
4663     - set_bit(ARS_SHORT, &nfit_spa->ars_state);
4664     + else
4665     scheduled++;
4666     - }
4667     }
4668     if (scheduled) {
4669     sched_ars(acpi_desc);
4670     @@ -3464,10 +3486,11 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
4671     static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
4672     {
4673     struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
4674     - unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
4675     - 0 : 1 << ARS_SHORT;
4676    
4677     - acpi_nfit_ars_rescan(acpi_desc, flags);
4678     + if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
4679     + acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
4680     + else
4681     + acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
4682     }
4683    
4684     void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
4685     diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
4686     index d1274ea2d251..02c10de50386 100644
4687     --- a/drivers/acpi/nfit/nfit.h
4688     +++ b/drivers/acpi/nfit/nfit.h
4689     @@ -118,10 +118,8 @@ enum nfit_dimm_notifiers {
4690     };
4691    
4692     enum nfit_ars_state {
4693     - ARS_REQ,
4694     - ARS_REQ_REDO,
4695     - ARS_DONE,
4696     - ARS_SHORT,
4697     + ARS_REQ_SHORT,
4698     + ARS_REQ_LONG,
4699     ARS_FAILED,
4700     };
4701    
4702     @@ -198,6 +196,7 @@ struct acpi_nfit_desc {
4703     struct device *dev;
4704     u8 ars_start_flags;
4705     struct nd_cmd_ars_status *ars_status;
4706     + struct nfit_spa *scrub_spa;
4707     struct delayed_work dwork;
4708     struct list_head list;
4709     struct kernfs_node *scrub_count_state;
4710     @@ -252,7 +251,8 @@ struct nfit_blk {
4711    
4712     extern struct list_head acpi_descs;
4713     extern struct mutex acpi_desc_lock;
4714     -int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
4715     +int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
4716     + enum nfit_ars_state req_type);
4717    
4718     #ifdef CONFIG_X86_MCE
4719     void nfit_mce_register(void);
4720     diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
4721     index 8df9abfa947b..ed73f6fb0779 100644
4722     --- a/drivers/acpi/osl.c
4723     +++ b/drivers/acpi/osl.c
4724     @@ -617,15 +617,18 @@ void acpi_os_stall(u32 us)
4725     }
4726    
4727     /*
4728     - * Support ACPI 3.0 AML Timer operand
4729     - * Returns 64-bit free-running, monotonically increasing timer
4730     - * with 100ns granularity
4731     + * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
4732     + * monotonically increasing timer with 100ns granularity. Do not use
4733     + * ktime_get() to implement this function because this function may get
4734     + * called after timekeeping has been suspended. Note: calling this function
4735     + * after timekeeping has been suspended may lead to unexpected results
4736     + * because when timekeeping is suspended the jiffies counter is not
4737     + * incremented. See also timekeeping_suspend().
4738     */
4739     u64 acpi_os_get_timer(void)
4740     {
4741     - u64 time_ns = ktime_to_ns(ktime_get());
4742     - do_div(time_ns, 100);
4743     - return time_ns;
4744     + return (get_jiffies_64() - INITIAL_JIFFIES) *
4745     + (ACPI_100NSEC_PER_SEC / HZ);
4746     }
4747    
4748     acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
4749     diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
4750     index d1e26cb599bf..da031b1df6f5 100644
4751     --- a/drivers/acpi/pptt.c
4752     +++ b/drivers/acpi/pptt.c
4753     @@ -338,9 +338,6 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
4754     return found;
4755     }
4756    
4757     -/* total number of attributes checked by the properties code */
4758     -#define PPTT_CHECKED_ATTRIBUTES 4
4759     -
4760     /**
4761     * update_cache_properties() - Update cacheinfo for the given processor
4762     * @this_leaf: Kernel cache info structure being updated
4763     @@ -357,25 +354,15 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
4764     struct acpi_pptt_cache *found_cache,
4765     struct acpi_pptt_processor *cpu_node)
4766     {
4767     - int valid_flags = 0;
4768     -
4769     this_leaf->fw_token = cpu_node;
4770     - if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID) {
4771     + if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
4772     this_leaf->size = found_cache->size;
4773     - valid_flags++;
4774     - }
4775     - if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID) {
4776     + if (found_cache->flags & ACPI_PPTT_LINE_SIZE_VALID)
4777     this_leaf->coherency_line_size = found_cache->line_size;
4778     - valid_flags++;
4779     - }
4780     - if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID) {
4781     + if (found_cache->flags & ACPI_PPTT_NUMBER_OF_SETS_VALID)
4782     this_leaf->number_of_sets = found_cache->number_of_sets;
4783     - valid_flags++;
4784     - }
4785     - if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID) {
4786     + if (found_cache->flags & ACPI_PPTT_ASSOCIATIVITY_VALID)
4787     this_leaf->ways_of_associativity = found_cache->associativity;
4788     - valid_flags++;
4789     - }
4790     if (found_cache->flags & ACPI_PPTT_WRITE_POLICY_VALID) {
4791     switch (found_cache->attributes & ACPI_PPTT_MASK_WRITE_POLICY) {
4792     case ACPI_PPTT_CACHE_POLICY_WT:
4793     @@ -402,11 +389,17 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
4794     }
4795     }
4796     /*
4797     - * If the above flags are valid, and the cache type is NOCACHE
4798     - * update the cache type as well.
4799     + * If cache type is NOCACHE, then the cache hasn't been specified
4800     + * via other mechanisms. Update the type if a cache type has been
4801     + * provided.
4802     + *
4803     + * Note, we assume such caches are unified based on conventional system
4804     + * design and known examples. Significant work is required elsewhere to
4805     + * fully support data/instruction only type caches which are only
4806     + * specified in PPTT.
4807     */
4808     if (this_leaf->type == CACHE_TYPE_NOCACHE &&
4809     - valid_flags == PPTT_CHECKED_ATTRIBUTES)
4810     + found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
4811     this_leaf->type = CACHE_TYPE_UNIFIED;
4812     }
4813    
4814     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
4815     index a9dd4ea7467d..6e594644cb1d 100644
4816     --- a/drivers/ata/libata-core.c
4817     +++ b/drivers/ata/libata-core.c
4818     @@ -4553,6 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4819     /* These specific Samsung models/firmware-revs do not handle LPM well */
4820     { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4821     { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4822     + { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
4823    
4824     /* devices that don't properly handle queued TRIM commands */
4825     { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4826     diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
4827     index dfb2c2622e5a..822e3060d834 100644
4828     --- a/drivers/block/ataflop.c
4829     +++ b/drivers/block/ataflop.c
4830     @@ -1935,6 +1935,11 @@ static int __init atari_floppy_init (void)
4831     unit[i].disk = alloc_disk(1);
4832     if (!unit[i].disk)
4833     goto Enomem;
4834     +
4835     + unit[i].disk->queue = blk_init_queue(do_fd_request,
4836     + &ataflop_lock);
4837     + if (!unit[i].disk->queue)
4838     + goto Enomem;
4839     }
4840    
4841     if (UseTrackbuffer < 0)
4842     @@ -1966,10 +1971,6 @@ static int __init atari_floppy_init (void)
4843     sprintf(unit[i].disk->disk_name, "fd%d", i);
4844     unit[i].disk->fops = &floppy_fops;
4845     unit[i].disk->private_data = &unit[i];
4846     - unit[i].disk->queue = blk_init_queue(do_fd_request,
4847     - &ataflop_lock);
4848     - if (!unit[i].disk->queue)
4849     - goto Enomem;
4850     set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
4851     add_disk(unit[i].disk);
4852     }
4853     @@ -1984,13 +1985,17 @@ static int __init atari_floppy_init (void)
4854    
4855     return 0;
4856     Enomem:
4857     - while (i--) {
4858     - struct request_queue *q = unit[i].disk->queue;
4859     + do {
4860     + struct gendisk *disk = unit[i].disk;
4861    
4862     - put_disk(unit[i].disk);
4863     - if (q)
4864     - blk_cleanup_queue(q);
4865     - }
4866     + if (disk) {
4867     + if (disk->queue) {
4868     + blk_cleanup_queue(disk->queue);
4869     + disk->queue = NULL;
4870     + }
4871     + put_disk(unit[i].disk);
4872     + }
4873     + } while (i--);
4874    
4875     unregister_blkdev(FLOPPY_MAJOR, "fd");
4876     return -ENOMEM;
4877     diff --git a/drivers/block/swim.c b/drivers/block/swim.c
4878     index 0e31884a9519..cbe909c51847 100644
4879     --- a/drivers/block/swim.c
4880     +++ b/drivers/block/swim.c
4881     @@ -887,8 +887,17 @@ static int swim_floppy_init(struct swim_priv *swd)
4882    
4883     exit_put_disks:
4884     unregister_blkdev(FLOPPY_MAJOR, "fd");
4885     - while (drive--)
4886     - put_disk(swd->unit[drive].disk);
4887     + do {
4888     + struct gendisk *disk = swd->unit[drive].disk;
4889     +
4890     + if (disk) {
4891     + if (disk->queue) {
4892     + blk_cleanup_queue(disk->queue);
4893     + disk->queue = NULL;
4894     + }
4895     + put_disk(disk);
4896     + }
4897     + } while (drive--);
4898     return err;
4899     }
4900    
4901     diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
4902     index 429d20131c7e..3e905da33bcb 100644
4903     --- a/drivers/block/xen-blkfront.c
4904     +++ b/drivers/block/xen-blkfront.c
4905     @@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
4906     GFP_KERNEL);
4907     if (!info->rinfo) {
4908     xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
4909     + info->nr_rings = 0;
4910     return -ENOMEM;
4911     }
4912    
4913     @@ -2493,6 +2494,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
4914    
4915     dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
4916    
4917     + if (!info)
4918     + return 0;
4919     +
4920     blkif_free(info, 0);
4921    
4922     mutex_lock(&info->mutex);
4923     diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
4924     index 99cde1f9467d..e3e4d929e74f 100644
4925     --- a/drivers/bluetooth/btbcm.c
4926     +++ b/drivers/bluetooth/btbcm.c
4927     @@ -324,6 +324,7 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
4928     { 0x4103, "BCM4330B1" }, /* 002.001.003 */
4929     { 0x410e, "BCM43341B0" }, /* 002.001.014 */
4930     { 0x4406, "BCM4324B3" }, /* 002.004.006 */
4931     + { 0x6109, "BCM4335C0" }, /* 003.001.009 */
4932     { 0x610c, "BCM4354" }, /* 003.001.012 */
4933     { 0x2122, "BCM4343A0" }, /* 001.001.034 */
4934     { 0x2209, "BCM43430A1" }, /* 001.002.009 */
4935     diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
4936     index 2fee65886d50..f0d593c3fa72 100644
4937     --- a/drivers/bluetooth/hci_qca.c
4938     +++ b/drivers/bluetooth/hci_qca.c
4939     @@ -167,7 +167,7 @@ struct qca_serdev {
4940     };
4941    
4942     static int qca_power_setup(struct hci_uart *hu, bool on);
4943     -static void qca_power_shutdown(struct hci_dev *hdev);
4944     +static void qca_power_shutdown(struct hci_uart *hu);
4945    
4946     static void __serial_clock_on(struct tty_struct *tty)
4947     {
4948     @@ -609,7 +609,7 @@ static int qca_close(struct hci_uart *hu)
4949     if (hu->serdev) {
4950     qcadev = serdev_device_get_drvdata(hu->serdev);
4951     if (qcadev->btsoc_type == QCA_WCN3990)
4952     - qca_power_shutdown(hu->hdev);
4953     + qca_power_shutdown(hu);
4954     else
4955     gpiod_set_value_cansleep(qcadev->bt_en, 0);
4956    
4957     @@ -1232,12 +1232,15 @@ static const struct qca_vreg_data qca_soc_data = {
4958     .num_vregs = 4,
4959     };
4960    
4961     -static void qca_power_shutdown(struct hci_dev *hdev)
4962     +static void qca_power_shutdown(struct hci_uart *hu)
4963     {
4964     - struct hci_uart *hu = hci_get_drvdata(hdev);
4965     + struct serdev_device *serdev = hu->serdev;
4966     + unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE;
4967    
4968     host_set_baudrate(hu, 2400);
4969     - qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
4970     + hci_uart_set_flow_control(hu, true);
4971     + serdev_device_write_buf(serdev, &cmd, sizeof(cmd));
4972     + hci_uart_set_flow_control(hu, false);
4973     qca_power_setup(hu, false);
4974     }
4975    
4976     @@ -1413,7 +1416,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
4977     struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
4978    
4979     if (qcadev->btsoc_type == QCA_WCN3990)
4980     - qca_power_shutdown(qcadev->serdev_hu.hdev);
4981     + qca_power_shutdown(&qcadev->serdev_hu);
4982     else
4983     clk_disable_unprepare(qcadev->susclk);
4984    
4985     diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
4986     index 29e67a80fb20..9b786726e426 100644
4987     --- a/drivers/char/ipmi/ipmi_ssif.c
4988     +++ b/drivers/char/ipmi/ipmi_ssif.c
4989     @@ -606,8 +606,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
4990     flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
4991     ssif_info->waiting_alert = true;
4992     ssif_info->rtc_us_timer = SSIF_MSG_USEC;
4993     - mod_timer(&ssif_info->retry_timer,
4994     - jiffies + SSIF_MSG_JIFFIES);
4995     + if (!ssif_info->stopping)
4996     + mod_timer(&ssif_info->retry_timer,
4997     + jiffies + SSIF_MSG_JIFFIES);
4998     ipmi_ssif_unlock_cond(ssif_info, flags);
4999     return;
5000     }
5001     @@ -939,8 +940,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
5002     ssif_info->waiting_alert = true;
5003     ssif_info->retries_left = SSIF_RECV_RETRIES;
5004     ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
5005     - mod_timer(&ssif_info->retry_timer,
5006     - jiffies + SSIF_MSG_PART_JIFFIES);
5007     + if (!ssif_info->stopping)
5008     + mod_timer(&ssif_info->retry_timer,
5009     + jiffies + SSIF_MSG_PART_JIFFIES);
5010     ipmi_ssif_unlock_cond(ssif_info, flags);
5011     }
5012     }
5013     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
5014     index 1a803b0cf980..7d958ff426e0 100644
5015     --- a/drivers/char/tpm/tpm-interface.c
5016     +++ b/drivers/char/tpm/tpm-interface.c
5017     @@ -663,7 +663,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
5018     return len;
5019    
5020     err = be32_to_cpu(header->return_code);
5021     - if (err != 0 && desc)
5022     + if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
5023     + && desc)
5024     dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
5025     desc);
5026     if (err)
5027     @@ -1321,7 +1322,8 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
5028     }
5029    
5030     rlength = be32_to_cpu(tpm_cmd.header.out.length);
5031     - if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
5032     + if (rlength < TPM_HEADER_SIZE +
5033     + offsetof(struct tpm_getrandom_out, rng_data) +
5034     recd) {
5035     total = -EFAULT;
5036     break;
5037     diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
5038     index c31b490bd41d..3acf4fd4e5a5 100644
5039     --- a/drivers/char/tpm/tpm2-cmd.c
5040     +++ b/drivers/char/tpm/tpm2-cmd.c
5041     @@ -329,7 +329,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
5042     &buf.data[TPM_HEADER_SIZE];
5043     recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
5044     if (tpm_buf_length(&buf) <
5045     - offsetof(struct tpm2_get_random_out, buffer) + recd) {
5046     + TPM_HEADER_SIZE +
5047     + offsetof(struct tpm2_get_random_out, buffer) +
5048     + recd) {
5049     err = -EFAULT;
5050     goto out;
5051     }
5052     diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
5053     index 911475d36800..b150f87f38f5 100644
5054     --- a/drivers/char/tpm/xen-tpmfront.c
5055     +++ b/drivers/char/tpm/xen-tpmfront.c
5056     @@ -264,7 +264,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
5057     return -ENOMEM;
5058     }
5059    
5060     - rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
5061     + rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
5062     if (rv < 0)
5063     return rv;
5064    
5065     diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
5066     index 0a9ebf00be46..e58bfcb1169e 100644
5067     --- a/drivers/cpufreq/cpufreq-dt.c
5068     +++ b/drivers/cpufreq/cpufreq-dt.c
5069     @@ -32,6 +32,7 @@ struct private_data {
5070     struct device *cpu_dev;
5071     struct thermal_cooling_device *cdev;
5072     const char *reg_name;
5073     + bool have_static_opps;
5074     };
5075    
5076     static struct freq_attr *cpufreq_dt_attr[] = {
5077     @@ -204,6 +205,15 @@ static int cpufreq_init(struct cpufreq_policy *policy)
5078     }
5079     }
5080    
5081     + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
5082     + if (!priv) {
5083     + ret = -ENOMEM;
5084     + goto out_put_regulator;
5085     + }
5086     +
5087     + priv->reg_name = name;
5088     + priv->opp_table = opp_table;
5089     +
5090     /*
5091     * Initialize OPP tables for all policy->cpus. They will be shared by
5092     * all CPUs which have marked their CPUs shared with OPP bindings.
5093     @@ -214,7 +224,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
5094     *
5095     * OPPs might be populated at runtime, don't check for error here
5096     */
5097     - dev_pm_opp_of_cpumask_add_table(policy->cpus);
5098     + if (!dev_pm_opp_of_cpumask_add_table(policy->cpus))
5099     + priv->have_static_opps = true;
5100    
5101     /*
5102     * But we need OPP table to function so if it is not there let's
5103     @@ -240,19 +251,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
5104     __func__, ret);
5105     }
5106    
5107     - priv = kzalloc(sizeof(*priv), GFP_KERNEL);
5108     - if (!priv) {
5109     - ret = -ENOMEM;
5110     - goto out_free_opp;
5111     - }
5112     -
5113     - priv->reg_name = name;
5114     - priv->opp_table = opp_table;
5115     -
5116     ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
5117     if (ret) {
5118     dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
5119     - goto out_free_priv;
5120     + goto out_free_opp;
5121     }
5122    
5123     priv->cpu_dev = cpu_dev;
5124     @@ -282,10 +284,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
5125    
5126     out_free_cpufreq_table:
5127     dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
5128     -out_free_priv:
5129     - kfree(priv);
5130     out_free_opp:
5131     - dev_pm_opp_of_cpumask_remove_table(policy->cpus);
5132     + if (priv->have_static_opps)
5133     + dev_pm_opp_of_cpumask_remove_table(policy->cpus);
5134     + kfree(priv);
5135     +out_put_regulator:
5136     if (name)
5137     dev_pm_opp_put_regulators(opp_table);
5138     out_put_clk:
5139     @@ -300,7 +303,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
5140    
5141     cpufreq_cooling_unregister(priv->cdev);
5142     dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
5143     - dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
5144     + if (priv->have_static_opps)
5145     + dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
5146     if (priv->reg_name)
5147     dev_pm_opp_put_regulators(priv->opp_table);
5148    
5149     diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
5150     index f20f20a77d4d..4268f87e99fc 100644
5151     --- a/drivers/cpufreq/cpufreq_conservative.c
5152     +++ b/drivers/cpufreq/cpufreq_conservative.c
5153     @@ -80,8 +80,10 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
5154     * changed in the meantime, so fall back to current frequency in that
5155     * case.
5156     */
5157     - if (requested_freq > policy->max || requested_freq < policy->min)
5158     + if (requested_freq > policy->max || requested_freq < policy->min) {
5159     requested_freq = policy->cur;
5160     + dbs_info->requested_freq = requested_freq;
5161     + }
5162    
5163     freq_step = get_freq_step(cs_tuners, policy);
5164    
5165     @@ -92,7 +94,7 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
5166     if (policy_dbs->idle_periods < UINT_MAX) {
5167     unsigned int freq_steps = policy_dbs->idle_periods * freq_step;
5168    
5169     - if (requested_freq > freq_steps)
5170     + if (requested_freq > policy->min + freq_steps)
5171     requested_freq -= freq_steps;
5172     else
5173     requested_freq = policy->min;
5174     diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
5175     index 4fb91ba39c36..ce3f9ad7120f 100644
5176     --- a/drivers/crypto/caam/regs.h
5177     +++ b/drivers/crypto/caam/regs.h
5178     @@ -70,22 +70,22 @@
5179     extern bool caam_little_end;
5180     extern bool caam_imx;
5181    
5182     -#define caam_to_cpu(len) \
5183     -static inline u##len caam##len ## _to_cpu(u##len val) \
5184     -{ \
5185     - if (caam_little_end) \
5186     - return le##len ## _to_cpu(val); \
5187     - else \
5188     - return be##len ## _to_cpu(val); \
5189     +#define caam_to_cpu(len) \
5190     +static inline u##len caam##len ## _to_cpu(u##len val) \
5191     +{ \
5192     + if (caam_little_end) \
5193     + return le##len ## _to_cpu((__force __le##len)val); \
5194     + else \
5195     + return be##len ## _to_cpu((__force __be##len)val); \
5196     }
5197    
5198     -#define cpu_to_caam(len) \
5199     -static inline u##len cpu_to_caam##len(u##len val) \
5200     -{ \
5201     - if (caam_little_end) \
5202     - return cpu_to_le##len(val); \
5203     - else \
5204     - return cpu_to_be##len(val); \
5205     +#define cpu_to_caam(len) \
5206     +static inline u##len cpu_to_caam##len(u##len val) \
5207     +{ \
5208     + if (caam_little_end) \
5209     + return (__force u##len)cpu_to_le##len(val); \
5210     + else \
5211     + return (__force u##len)cpu_to_be##len(val); \
5212     }
5213    
5214     caam_to_cpu(16)
5215     diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
5216     index 85820a2d69d4..987899610b46 100644
5217     --- a/drivers/dma/dma-jz4780.c
5218     +++ b/drivers/dma/dma-jz4780.c
5219     @@ -761,6 +761,11 @@ static int jz4780_dma_probe(struct platform_device *pdev)
5220     struct resource *res;
5221     int i, ret;
5222    
5223     + if (!dev->of_node) {
5224     + dev_err(dev, "This driver must be probed from devicetree\n");
5225     + return -EINVAL;
5226     + }
5227     +
5228     jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
5229     if (!jzdma)
5230     return -ENOMEM;
5231     diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
5232     index 4fa4c06c9edb..21a5708985bc 100644
5233     --- a/drivers/dma/ioat/init.c
5234     +++ b/drivers/dma/ioat/init.c
5235     @@ -1205,8 +1205,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
5236    
5237     spin_lock_bh(&ioat_chan->prep_lock);
5238     set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
5239     - del_timer_sync(&ioat_chan->timer);
5240     spin_unlock_bh(&ioat_chan->prep_lock);
5241     + /*
5242     + * Synchronization rule for del_timer_sync():
5243     + * - The caller must not hold locks which would prevent
5244     + * completion of the timer's handler.
5245     + * So prep_lock cannot be held before calling it.
5246     + */
5247     + del_timer_sync(&ioat_chan->timer);
5248     +
5249     /* this should quiesce then reset */
5250     ioat_reset_hw(ioat_chan);
5251     }
5252     diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
5253     index 4cf0d4d0cecf..25610286979f 100644
5254     --- a/drivers/dma/ppc4xx/adma.c
5255     +++ b/drivers/dma/ppc4xx/adma.c
5256     @@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
5257     }
5258     static DRIVER_ATTR_RW(enable);
5259    
5260     -static ssize_t poly_store(struct device_driver *dev, char *buf)
5261     +static ssize_t poly_show(struct device_driver *dev, char *buf)
5262     {
5263     ssize_t size = 0;
5264     u32 reg;
5265     diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
5266     index 18aeabb1d5ee..e2addb2bca29 100644
5267     --- a/drivers/edac/amd64_edac.c
5268     +++ b/drivers/edac/amd64_edac.c
5269     @@ -2200,6 +2200,15 @@ static struct amd64_family_type family_types[] = {
5270     .dbam_to_cs = f17_base_addr_to_cs_size,
5271     }
5272     },
5273     + [F17_M10H_CPUS] = {
5274     + .ctl_name = "F17h_M10h",
5275     + .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
5276     + .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
5277     + .ops = {
5278     + .early_channel_count = f17_early_channel_count,
5279     + .dbam_to_cs = f17_base_addr_to_cs_size,
5280     + }
5281     + },
5282     };
5283    
5284     /*
5285     @@ -3188,6 +3197,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
5286     break;
5287    
5288     case 0x17:
5289     + if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
5290     + fam_type = &family_types[F17_M10H_CPUS];
5291     + pvt->ops = &family_types[F17_M10H_CPUS].ops;
5292     + break;
5293     + }
5294     fam_type = &family_types[F17_CPUS];
5295     pvt->ops = &family_types[F17_CPUS].ops;
5296     break;
5297     diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
5298     index 1d4b74e9a037..4242f8e39c18 100644
5299     --- a/drivers/edac/amd64_edac.h
5300     +++ b/drivers/edac/amd64_edac.h
5301     @@ -115,6 +115,8 @@
5302     #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
5303     #define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
5304     #define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
5305     +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
5306     +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
5307    
5308     /*
5309     * Function 1 - Address Map
5310     @@ -281,6 +283,7 @@ enum amd_families {
5311     F16_CPUS,
5312     F16_M30H_CPUS,
5313     F17_CPUS,
5314     + F17_M10H_CPUS,
5315     NUM_FAMILIES,
5316     };
5317    
5318     diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
5319     index 8e120bf60624..f1d19504a028 100644
5320     --- a/drivers/edac/i7core_edac.c
5321     +++ b/drivers/edac/i7core_edac.c
5322     @@ -1711,6 +1711,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
5323     u32 errnum = find_first_bit(&error, 32);
5324    
5325     if (uncorrected_error) {
5326     + core_err_cnt = 1;
5327     if (ripv)
5328     tp_event = HW_EVENT_ERR_FATAL;
5329     else
5330     diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
5331     index 07726fb00321..72cea3cb8622 100644
5332     --- a/drivers/edac/sb_edac.c
5333     +++ b/drivers/edac/sb_edac.c
5334     @@ -2888,6 +2888,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
5335     recoverable = GET_BITFIELD(m->status, 56, 56);
5336    
5337     if (uncorrected_error) {
5338     + core_err_cnt = 1;
5339     if (ripv) {
5340     type = "FATAL";
5341     tp_event = HW_EVENT_ERR_FATAL;
5342     diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
5343     index fae095162c01..4ba92f1dd0f7 100644
5344     --- a/drivers/edac/skx_edac.c
5345     +++ b/drivers/edac/skx_edac.c
5346     @@ -668,7 +668,7 @@ sad_found:
5347     break;
5348     case 2:
5349     lchan = (addr >> shift) % 2;
5350     - lchan = (lchan << 1) | ~lchan;
5351     + lchan = (lchan << 1) | !lchan;
5352     break;
5353     case 3:
5354     lchan = ((addr >> shift) % 2) << 1;
5355     @@ -959,6 +959,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
5356     recoverable = GET_BITFIELD(m->status, 56, 56);
5357    
5358     if (uncorrected_error) {
5359     + core_err_cnt = 1;
5360     if (ripv) {
5361     type = "FATAL";
5362     tp_event = HW_EVENT_ERR_FATAL;
5363     diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
5364     index 19db5709ae28..898bb9abc41f 100644
5365     --- a/drivers/firmware/google/coreboot_table.c
5366     +++ b/drivers/firmware/google/coreboot_table.c
5367     @@ -110,7 +110,8 @@ int coreboot_table_init(struct device *dev, void __iomem *ptr)
5368    
5369     if (strncmp(header.signature, "LBIO", sizeof(header.signature))) {
5370     pr_warn("coreboot_table: coreboot table missing or corrupt!\n");
5371     - return -ENODEV;
5372     + ret = -ENODEV;
5373     + goto out;
5374     }
5375    
5376     ptr_entry = (void *)ptr_header + header.header_bytes;
5377     @@ -137,7 +138,8 @@ int coreboot_table_init(struct device *dev, void __iomem *ptr)
5378    
5379     ptr_entry += entry.size;
5380     }
5381     -
5382     +out:
5383     + iounmap(ptr);
5384     return ret;
5385     }
5386     EXPORT_SYMBOL(coreboot_table_init);
5387     @@ -146,7 +148,6 @@ int coreboot_table_exit(void)
5388     {
5389     if (ptr_header) {
5390     bus_unregister(&coreboot_bus_type);
5391     - iounmap(ptr_header);
5392     ptr_header = NULL;
5393     }
5394    
5395     diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
5396     index 16c7f9f49416..af936dcca659 100644
5397     --- a/drivers/gpio/gpio-brcmstb.c
5398     +++ b/drivers/gpio/gpio-brcmstb.c
5399     @@ -664,6 +664,18 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
5400     struct brcmstb_gpio_bank *bank;
5401     struct gpio_chip *gc;
5402    
5403     + /*
5404     + * If bank_width is 0, then there is an empty bank in the
5405     + * register block. Special handling for this case.
5406     + */
5407     + if (bank_width == 0) {
5408     + dev_dbg(dev, "Width 0 found: Empty bank @ %d\n",
5409     + num_banks);
5410     + num_banks++;
5411     + gpio_base += MAX_GPIO_PER_BANK;
5412     + continue;
5413     + }
5414     +
5415     bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
5416     if (!bank) {
5417     err = -ENOMEM;
5418     @@ -740,9 +752,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
5419     goto fail;
5420     }
5421    
5422     - dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
5423     - num_banks, priv->gpio_base, gpio_base - 1);
5424     -
5425     if (priv->parent_wake_irq && need_wakeup_event)
5426     pm_wakeup_event(dev, 0);
5427    
5428     diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
5429     index df30490da820..ea874fd033a5 100644
5430     --- a/drivers/gpio/gpio-mxs.c
5431     +++ b/drivers/gpio/gpio-mxs.c
5432     @@ -18,8 +18,6 @@
5433     #include <linux/platform_device.h>
5434     #include <linux/slab.h>
5435     #include <linux/gpio/driver.h>
5436     -/* FIXME: for gpio_get_value(), replace this by direct register read */
5437     -#include <linux/gpio.h>
5438     #include <linux/module.h>
5439    
5440     #define MXS_SET 0x4
5441     @@ -86,7 +84,7 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
5442     port->both_edges &= ~pin_mask;
5443     switch (type) {
5444     case IRQ_TYPE_EDGE_BOTH:
5445     - val = gpio_get_value(port->gc.base + d->hwirq);
5446     + val = port->gc.get(&port->gc, d->hwirq);
5447     if (val)
5448     edge = GPIO_INT_FALL_EDGE;
5449     else
5450     diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
5451     index 2d45d1dd9554..643f5edd68fe 100644
5452     --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
5453     +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
5454     @@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
5455     }
5456    
5457     /* The CEC module handles HDMI hotplug detection */
5458     - cec_np = of_find_compatible_node(np->parent, NULL,
5459     - "mediatek,mt8173-cec");
5460     + cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
5461     if (!cec_np) {
5462     dev_err(dev, "Failed to find CEC node\n");
5463     return -EINVAL;
5464     @@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
5465     if (!cec_pdev) {
5466     dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
5467     cec_np);
5468     + of_node_put(cec_np);
5469     return -EPROBE_DEFER;
5470     }
5471     + of_node_put(cec_np);
5472     hdmi->cec_dev = &cec_pdev->dev;
5473    
5474     /*
5475     diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
5476     index cf2a18571d48..a132c37d7334 100644
5477     --- a/drivers/gpu/vga/vga_switcheroo.c
5478     +++ b/drivers/gpu/vga/vga_switcheroo.c
5479     @@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
5480     mutex_unlock(&vgasr_mutex);
5481     return -EINVAL;
5482     }
5483     + /* notify if GPU has been already bound */
5484     + if (ops->gpu_bound)
5485     + ops->gpu_bound(pdev, id);
5486     }
5487     mutex_unlock(&vgasr_mutex);
5488    
5489     diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
5490     index 23872d08308c..a746017fac17 100644
5491     --- a/drivers/hid/usbhid/hiddev.c
5492     +++ b/drivers/hid/usbhid/hiddev.c
5493     @@ -512,14 +512,24 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
5494     if (cmd == HIDIOCGCOLLECTIONINDEX) {
5495     if (uref->usage_index >= field->maxusage)
5496     goto inval;
5497     + uref->usage_index =
5498     + array_index_nospec(uref->usage_index,
5499     + field->maxusage);
5500     } else if (uref->usage_index >= field->report_count)
5501     goto inval;
5502     }
5503    
5504     - if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
5505     - (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
5506     - uref->usage_index + uref_multi->num_values > field->report_count))
5507     - goto inval;
5508     + if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
5509     + if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
5510     + uref->usage_index + uref_multi->num_values >
5511     + field->report_count)
5512     + goto inval;
5513     +
5514     + uref->usage_index =
5515     + array_index_nospec(uref->usage_index,
5516     + field->report_count -
5517     + uref_multi->num_values);
5518     + }
5519    
5520     switch (cmd) {
5521     case HIDIOCGUSAGE:
5522     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
5523     index e0a06be5ef5c..5dd3a8245f0f 100644
5524     --- a/drivers/hid/wacom_wac.c
5525     +++ b/drivers/hid/wacom_wac.c
5526     @@ -3335,6 +3335,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
5527    
5528     void wacom_setup_device_quirks(struct wacom *wacom)
5529     {
5530     + struct wacom_wac *wacom_wac = &wacom->wacom_wac;
5531     struct wacom_features *features = &wacom->wacom_wac.features;
5532    
5533     /* The pen and pad share the same interface on most devices */
5534     @@ -3464,6 +3465,24 @@ void wacom_setup_device_quirks(struct wacom *wacom)
5535    
5536     if (features->type == REMOTE)
5537     features->device_type |= WACOM_DEVICETYPE_WL_MONITOR;
5538     +
5539     + /* HID descriptor for DTK-2451 / DTH-2452 claims to report lots
5540     + * of things it shouldn't. Lets fix up the damage...
5541     + */
5542     + if (wacom->hdev->product == 0x382 || wacom->hdev->product == 0x37d) {
5543     + features->quirks &= ~WACOM_QUIRK_TOOLSERIAL;
5544     + __clear_bit(BTN_TOOL_BRUSH, wacom_wac->pen_input->keybit);
5545     + __clear_bit(BTN_TOOL_PENCIL, wacom_wac->pen_input->keybit);
5546     + __clear_bit(BTN_TOOL_AIRBRUSH, wacom_wac->pen_input->keybit);
5547     + __clear_bit(ABS_Z, wacom_wac->pen_input->absbit);
5548     + __clear_bit(ABS_DISTANCE, wacom_wac->pen_input->absbit);
5549     + __clear_bit(ABS_TILT_X, wacom_wac->pen_input->absbit);
5550     + __clear_bit(ABS_TILT_Y, wacom_wac->pen_input->absbit);
5551     + __clear_bit(ABS_WHEEL, wacom_wac->pen_input->absbit);
5552     + __clear_bit(ABS_MISC, wacom_wac->pen_input->absbit);
5553     + __clear_bit(MSC_SERIAL, wacom_wac->pen_input->mscbit);
5554     + __clear_bit(EV_MSC, wacom_wac->pen_input->evbit);
5555     + }
5556     }
5557    
5558     int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
5559     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
5560     index 0f0e091c117c..c4a1ebcfffb6 100644
5561     --- a/drivers/hv/channel_mgmt.c
5562     +++ b/drivers/hv/channel_mgmt.c
5563     @@ -606,16 +606,18 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
5564     bool perf_chn = vmbus_devs[dev_type].perf_device;
5565     struct vmbus_channel *primary = channel->primary_channel;
5566     int next_node;
5567     - struct cpumask available_mask;
5568     + cpumask_var_t available_mask;
5569     struct cpumask *alloced_mask;
5570    
5571     if ((vmbus_proto_version == VERSION_WS2008) ||
5572     - (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
5573     + (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
5574     + !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
5575     /*
5576     * Prior to win8, all channel interrupts are
5577     * delivered on cpu 0.
5578     * Also if the channel is not a performance critical
5579     * channel, bind it to cpu 0.
5580     + * In case alloc_cpumask_var() fails, bind it to cpu 0.
5581     */
5582     channel->numa_node = 0;
5583     channel->target_cpu = 0;
5584     @@ -653,7 +655,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
5585     cpumask_clear(alloced_mask);
5586     }
5587    
5588     - cpumask_xor(&available_mask, alloced_mask,
5589     + cpumask_xor(available_mask, alloced_mask,
5590     cpumask_of_node(primary->numa_node));
5591    
5592     cur_cpu = -1;
5593     @@ -671,10 +673,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
5594     }
5595    
5596     while (true) {
5597     - cur_cpu = cpumask_next(cur_cpu, &available_mask);
5598     + cur_cpu = cpumask_next(cur_cpu, available_mask);
5599     if (cur_cpu >= nr_cpu_ids) {
5600     cur_cpu = -1;
5601     - cpumask_copy(&available_mask,
5602     + cpumask_copy(available_mask,
5603     cpumask_of_node(primary->numa_node));
5604     continue;
5605     }
5606     @@ -704,6 +706,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
5607    
5608     channel->target_cpu = cur_cpu;
5609     channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
5610     +
5611     + free_cpumask_var(available_mask);
5612     }
5613    
5614     static void vmbus_wait_for_unload(void)
5615     diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
5616     index 7718e58dbda5..7688dab32f6e 100644
5617     --- a/drivers/hwmon/pmbus/pmbus.c
5618     +++ b/drivers/hwmon/pmbus/pmbus.c
5619     @@ -118,6 +118,8 @@ static int pmbus_identify(struct i2c_client *client,
5620     } else {
5621     info->pages = 1;
5622     }
5623     +
5624     + pmbus_clear_faults(client);
5625     }
5626    
5627     if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
5628     diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
5629     index 82c3754e21e3..2e2b5851139c 100644
5630     --- a/drivers/hwmon/pmbus/pmbus_core.c
5631     +++ b/drivers/hwmon/pmbus/pmbus_core.c
5632     @@ -2015,7 +2015,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
5633     if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
5634     client->flags |= I2C_CLIENT_PEC;
5635    
5636     - pmbus_clear_faults(client);
5637     + if (data->info->pages)
5638     + pmbus_clear_faults(client);
5639     + else
5640     + pmbus_clear_fault_page(client, -1);
5641    
5642     if (info->identify) {
5643     ret = (*info->identify)(client, info);
5644     diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
5645     index 7838af58f92d..9d611dd268e1 100644
5646     --- a/drivers/hwmon/pwm-fan.c
5647     +++ b/drivers/hwmon/pwm-fan.c
5648     @@ -290,9 +290,19 @@ static int pwm_fan_remove(struct platform_device *pdev)
5649     static int pwm_fan_suspend(struct device *dev)
5650     {
5651     struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
5652     + struct pwm_args args;
5653     + int ret;
5654     +
5655     + pwm_get_args(ctx->pwm, &args);
5656     +
5657     + if (ctx->pwm_value) {
5658     + ret = pwm_config(ctx->pwm, 0, args.period);
5659     + if (ret < 0)
5660     + return ret;
5661    
5662     - if (ctx->pwm_value)
5663     pwm_disable(ctx->pwm);
5664     + }
5665     +
5666     return 0;
5667     }
5668    
5669     diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
5670     index 306119eaf16a..0dad8626bcfb 100644
5671     --- a/drivers/hwtracing/coresight/coresight-etb10.c
5672     +++ b/drivers/hwtracing/coresight/coresight-etb10.c
5673     @@ -147,6 +147,10 @@ static int etb_enable(struct coresight_device *csdev, u32 mode)
5674     if (val == CS_MODE_PERF)
5675     return -EBUSY;
5676    
5677     + /* Don't let perf disturb sysFS sessions */
5678     + if (val == CS_MODE_SYSFS && mode == CS_MODE_PERF)
5679     + return -EBUSY;
5680     +
5681     /* Nothing to do, the tracer is already enabled. */
5682     if (val == CS_MODE_SYSFS)
5683     goto out;
5684     diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
5685     index 44b516863c9d..75d2f73582a3 100644
5686     --- a/drivers/iio/adc/at91_adc.c
5687     +++ b/drivers/iio/adc/at91_adc.c
5688     @@ -248,12 +248,14 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
5689     struct iio_poll_func *pf = p;
5690     struct iio_dev *idev = pf->indio_dev;
5691     struct at91_adc_state *st = iio_priv(idev);
5692     + struct iio_chan_spec const *chan;
5693     int i, j = 0;
5694    
5695     for (i = 0; i < idev->masklength; i++) {
5696     if (!test_bit(i, idev->active_scan_mask))
5697     continue;
5698     - st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i));
5699     + chan = idev->channels + i;
5700     + st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel));
5701     j++;
5702     }
5703    
5704     @@ -279,6 +281,8 @@ static void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
5705     iio_trigger_poll(idev->trig);
5706     } else {
5707     st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb));
5708     + /* Needed to ACK the DRDY interruption */
5709     + at91_adc_readl(st, AT91_ADC_LCDR);
5710     st->done = true;
5711     wake_up_interruptible(&st->wq_data_avail);
5712     }
5713     diff --git a/drivers/iio/adc/fsl-imx25-gcq.c b/drivers/iio/adc/fsl-imx25-gcq.c
5714     index ea264fa9e567..929c617db364 100644
5715     --- a/drivers/iio/adc/fsl-imx25-gcq.c
5716     +++ b/drivers/iio/adc/fsl-imx25-gcq.c
5717     @@ -209,12 +209,14 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
5718     ret = of_property_read_u32(child, "reg", &reg);
5719     if (ret) {
5720     dev_err(dev, "Failed to get reg property\n");
5721     + of_node_put(child);
5722     return ret;
5723     }
5724    
5725     if (reg >= MX25_NUM_CFGS) {
5726     dev_err(dev,
5727     "reg value is greater than the number of available configuration registers\n");
5728     + of_node_put(child);
5729     return -EINVAL;
5730     }
5731    
5732     @@ -228,6 +230,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
5733     if (IS_ERR(priv->vref[refp])) {
5734     dev_err(dev, "Error, trying to use external voltage reference without a vref-%s regulator.",
5735     mx25_gcq_refp_names[refp]);
5736     + of_node_put(child);
5737     return PTR_ERR(priv->vref[refp]);
5738     }
5739     priv->channel_vref_mv[reg] =
5740     @@ -240,6 +243,7 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
5741     break;
5742     default:
5743     dev_err(dev, "Invalid positive reference %d\n", refp);
5744     + of_node_put(child);
5745     return -EINVAL;
5746     }
5747    
5748     @@ -254,10 +258,12 @@ static int mx25_gcq_setup_cfgs(struct platform_device *pdev,
5749    
5750     if ((refp & MX25_ADCQ_CFG_REFP_MASK) != refp) {
5751     dev_err(dev, "Invalid fsl,adc-refp property value\n");
5752     + of_node_put(child);
5753     return -EINVAL;
5754     }
5755     if ((refn & MX25_ADCQ_CFG_REFN_MASK) != refn) {
5756     dev_err(dev, "Invalid fsl,adc-refn property value\n");
5757     + of_node_put(child);
5758     return -EINVAL;
5759     }
5760    
5761     diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
5762     index bf4fc40ec84d..2f98cb2a3b96 100644
5763     --- a/drivers/iio/dac/ad5064.c
5764     +++ b/drivers/iio/dac/ad5064.c
5765     @@ -808,6 +808,40 @@ static int ad5064_set_config(struct ad5064_state *st, unsigned int val)
5766     return ad5064_write(st, cmd, 0, val, 0);
5767     }
5768    
5769     +static int ad5064_request_vref(struct ad5064_state *st, struct device *dev)
5770     +{
5771     + unsigned int i;
5772     + int ret;
5773     +
5774     + for (i = 0; i < ad5064_num_vref(st); ++i)
5775     + st->vref_reg[i].supply = ad5064_vref_name(st, i);
5776     +
5777     + if (!st->chip_info->internal_vref)
5778     + return devm_regulator_bulk_get(dev, ad5064_num_vref(st),
5779     + st->vref_reg);
5780     +
5781     + /*
5782     + * This assumes that when the regulator has an internal VREF
5783     + * there is only one external VREF connection, which is
5784     + * currently the case for all supported devices.
5785     + */
5786     + st->vref_reg[0].consumer = devm_regulator_get_optional(dev, "vref");
5787     + if (!IS_ERR(st->vref_reg[0].consumer))
5788     + return 0;
5789     +
5790     + ret = PTR_ERR(st->vref_reg[0].consumer);
5791     + if (ret != -ENODEV)
5792     + return ret;
5793     +
5794     + /* If no external regulator was supplied use the internal VREF */
5795     + st->use_internal_vref = true;
5796     + ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE);
5797     + if (ret)
5798     + dev_err(dev, "Failed to enable internal vref: %d\n", ret);
5799     +
5800     + return ret;
5801     +}
5802     +
5803     static int ad5064_probe(struct device *dev, enum ad5064_type type,
5804     const char *name, ad5064_write_func write)
5805     {
5806     @@ -828,22 +862,11 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
5807     st->dev = dev;
5808     st->write = write;
5809    
5810     - for (i = 0; i < ad5064_num_vref(st); ++i)
5811     - st->vref_reg[i].supply = ad5064_vref_name(st, i);
5812     + ret = ad5064_request_vref(st, dev);
5813     + if (ret)
5814     + return ret;
5815    
5816     - ret = devm_regulator_bulk_get(dev, ad5064_num_vref(st),
5817     - st->vref_reg);
5818     - if (ret) {
5819     - if (!st->chip_info->internal_vref)
5820     - return ret;
5821     - st->use_internal_vref = true;
5822     - ret = ad5064_set_config(st, AD5064_CONFIG_INT_VREF_ENABLE);
5823     - if (ret) {
5824     - dev_err(dev, "Failed to enable internal vref: %d\n",
5825     - ret);
5826     - return ret;
5827     - }
5828     - } else {
5829     + if (!st->use_internal_vref) {
5830     ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
5831     if (ret)
5832     return ret;
5833     diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
5834     index 6e39c27dca8e..4c533275d1f2 100644
5835     --- a/drivers/infiniband/core/cm.c
5836     +++ b/drivers/infiniband/core/cm.c
5837     @@ -3292,8 +3292,11 @@ static int cm_lap_handler(struct cm_work *work)
5838     if (ret)
5839     goto unlock;
5840    
5841     - cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av,
5842     - cm_id_priv);
5843     + ret = cm_init_av_by_path(param->alternate_path, NULL,
5844     + &cm_id_priv->alt_av, cm_id_priv);
5845     + if (ret)
5846     + goto unlock;
5847     +
5848     cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
5849     cm_id_priv->tid = lap_msg->hdr.tid;
5850     ret = atomic_inc_and_test(&cm_id_priv->work_count);
5851     diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
5852     index 7fd14ead7b37..ace40bb98624 100644
5853     --- a/drivers/infiniband/core/sysfs.c
5854     +++ b/drivers/infiniband/core/sysfs.c
5855     @@ -512,7 +512,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
5856     ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
5857     40 + offset / 8, sizeof(data));
5858     if (ret < 0)
5859     - return sprintf(buf, "N/A (no PMA)\n");
5860     + return ret;
5861    
5862     switch (width) {
5863     case 4:
5864     @@ -1057,10 +1057,12 @@ static int add_port(struct ib_device *device, int port_num,
5865     goto err_put;
5866     }
5867    
5868     - p->pma_table = get_counter_table(device, port_num);
5869     - ret = sysfs_create_group(&p->kobj, p->pma_table);
5870     - if (ret)
5871     - goto err_put_gid_attrs;
5872     + if (device->process_mad) {
5873     + p->pma_table = get_counter_table(device, port_num);
5874     + ret = sysfs_create_group(&p->kobj, p->pma_table);
5875     + if (ret)
5876     + goto err_put_gid_attrs;
5877     + }
5878    
5879     p->gid_group.name = "gids";
5880     p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
5881     @@ -1173,7 +1175,8 @@ err_free_gid:
5882     p->gid_group.attrs = NULL;
5883    
5884     err_remove_pma:
5885     - sysfs_remove_group(&p->kobj, p->pma_table);
5886     + if (p->pma_table)
5887     + sysfs_remove_group(&p->kobj, p->pma_table);
5888    
5889     err_put_gid_attrs:
5890     kobject_put(&p->gid_attr_group->kobj);
5891     @@ -1285,7 +1288,9 @@ static void free_port_list_attributes(struct ib_device *device)
5892     kfree(port->hw_stats);
5893     free_hsag(&port->kobj, port->hw_stats_ag);
5894     }
5895     - sysfs_remove_group(p, port->pma_table);
5896     +
5897     + if (port->pma_table)
5898     + sysfs_remove_group(p, port->pma_table);
5899     sysfs_remove_group(p, &port->pkey_group);
5900     sysfs_remove_group(p, &port->gid_group);
5901     sysfs_remove_group(&port->gid_attr_group->kobj,
5902     diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
5903     index 6ad0d46ab879..249efa0a6aba 100644
5904     --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
5905     +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
5906     @@ -360,7 +360,8 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
5907     }
5908    
5909     /* Make sure the HW is stopped! */
5910     - bnxt_qplib_nq_stop_irq(nq, true);
5911     + if (nq->requested)
5912     + bnxt_qplib_nq_stop_irq(nq, true);
5913    
5914     if (nq->bar_reg_iomem)
5915     iounmap(nq->bar_reg_iomem);
5916     diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
5917     index 2852d350ada1..6637df77d236 100644
5918     --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
5919     +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
5920     @@ -309,8 +309,17 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
5921     rcfw->aeq_handler(rcfw, qp_event, qp);
5922     break;
5923     default:
5924     - /* Command Response */
5925     - spin_lock_irqsave(&cmdq->lock, flags);
5926     + /*
5927     + * Command Response
5928     + * cmdq->lock needs to be acquired to synchronie
5929     + * the command send and completion reaping. This function
5930     + * is always called with creq->lock held. Using
5931     + * the nested variant of spin_lock.
5932     + *
5933     + */
5934     +
5935     + spin_lock_irqsave_nested(&cmdq->lock, flags,
5936     + SINGLE_DEPTH_NESTING);
5937     cookie = le16_to_cpu(qp_event->cookie);
5938     mcookie = qp_event->cookie;
5939     blocked = cookie & RCFW_CMD_IS_BLOCKING;
5940     diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
5941     index e22314837645..7df4a4fe4af4 100644
5942     --- a/drivers/infiniband/hw/mlx5/mr.c
5943     +++ b/drivers/infiniband/hw/mlx5/mr.c
5944     @@ -691,7 +691,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
5945     init_completion(&ent->compl);
5946     INIT_WORK(&ent->work, cache_work_func);
5947     INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
5948     - queue_work(cache->wq, &ent->work);
5949    
5950     if (i > MR_CACHE_LAST_STD_ENTRY) {
5951     mlx5_odp_init_mr_cache_entry(ent);
5952     @@ -711,6 +710,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
5953     ent->limit = dev->mdev->profile->mr_cache[i].limit;
5954     else
5955     ent->limit = 0;
5956     + queue_work(cache->wq, &ent->work);
5957     }
5958    
5959     err = mlx5_mr_cache_debugfs_init(dev);
5960     diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
5961     index 6cba2a02d11b..d53d954ac8af 100644
5962     --- a/drivers/infiniband/hw/mlx5/qp.c
5963     +++ b/drivers/infiniband/hw/mlx5/qp.c
5964     @@ -3243,7 +3243,9 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new
5965     int req = IB_QP_STATE;
5966     int opt = 0;
5967    
5968     - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
5969     + if (new_state == IB_QPS_RESET) {
5970     + return is_valid_mask(attr_mask, req, opt);
5971     + } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
5972     req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
5973     return is_valid_mask(attr_mask, req, opt);
5974     } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
5975     diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
5976     index aa5833318372..fc6c880756da 100644
5977     --- a/drivers/infiniband/sw/rxe/rxe_resp.c
5978     +++ b/drivers/infiniband/sw/rxe/rxe_resp.c
5979     @@ -682,6 +682,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
5980     rxe_advance_resp_resource(qp);
5981    
5982     res->type = RXE_READ_MASK;
5983     + res->replay = 0;
5984    
5985     res->read.va = qp->resp.va;
5986     res->read.va_org = qp->resp.va;
5987     @@ -752,7 +753,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
5988     state = RESPST_DONE;
5989     } else {
5990     qp->resp.res = NULL;
5991     - qp->resp.opcode = -1;
5992     + if (!res->replay)
5993     + qp->resp.opcode = -1;
5994     if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
5995     qp->resp.psn = res->cur_psn;
5996     state = RESPST_CLEANUP;
5997     @@ -814,6 +816,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
5998    
5999     /* next expected psn, read handles this separately */
6000     qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
6001     + qp->resp.ack_psn = qp->resp.psn;
6002    
6003     qp->resp.opcode = pkt->opcode;
6004     qp->resp.status = IB_WC_SUCCESS;
6005     @@ -1065,7 +1068,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
6006     struct rxe_pkt_info *pkt)
6007     {
6008     enum resp_states rc;
6009     - u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
6010     + u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
6011    
6012     if (pkt->mask & RXE_SEND_MASK ||
6013     pkt->mask & RXE_WRITE_MASK) {
6014     @@ -1108,6 +1111,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
6015     res->state = (pkt->psn == res->first_psn) ?
6016     rdatm_res_state_new :
6017     rdatm_res_state_replay;
6018     + res->replay = 1;
6019    
6020     /* Reset the resource, except length. */
6021     res->read.va_org = iova;
6022     diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
6023     index af1470d29391..332a16dad2a7 100644
6024     --- a/drivers/infiniband/sw/rxe/rxe_verbs.h
6025     +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
6026     @@ -171,6 +171,7 @@ enum rdatm_res_state {
6027    
6028     struct resp_res {
6029     int type;
6030     + int replay;
6031     u32 first_psn;
6032     u32 last_psn;
6033     u32 cur_psn;
6034     @@ -195,6 +196,7 @@ struct rxe_resp_info {
6035     enum rxe_qp_state state;
6036     u32 msn;
6037     u32 psn;
6038     + u32 ack_psn;
6039     int opcode;
6040     int drop_msg;
6041     int goto_error;
6042     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
6043     index 3d5424f335cb..0428e01e8f69 100644
6044     --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
6045     +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
6046     @@ -1438,11 +1438,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
6047     spin_unlock_irqrestore(&priv->lock, flags);
6048     netif_tx_unlock_bh(dev);
6049    
6050     - if (skb->protocol == htons(ETH_P_IP))
6051     + if (skb->protocol == htons(ETH_P_IP)) {
6052     + memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
6053     icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
6054     + }
6055     #if IS_ENABLED(CONFIG_IPV6)
6056     - else if (skb->protocol == htons(ETH_P_IPV6))
6057     + else if (skb->protocol == htons(ETH_P_IPV6)) {
6058     + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
6059     icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
6060     + }
6061     #endif
6062     dev_kfree_skb_any(skb);
6063    
6064     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
6065     index e3d28f9ad9c0..30f840f874b3 100644
6066     --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
6067     +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
6068     @@ -1880,6 +1880,8 @@ static int ipoib_parent_init(struct net_device *ndev)
6069     sizeof(union ib_gid));
6070    
6071     SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
6072     + priv->dev->dev_port = priv->port - 1;
6073     + /* Let's set this one too for backwards compatibility. */
6074     priv->dev->dev_id = priv->port - 1;
6075    
6076     return 0;
6077     diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
6078     index fd1b80ef9490..e7cbf4fcf61d 100644
6079     --- a/drivers/iommu/arm-smmu.c
6080     +++ b/drivers/iommu/arm-smmu.c
6081     @@ -469,6 +469,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
6082     bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
6083     void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
6084    
6085     + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
6086     + wmb();
6087     +
6088     if (stage1) {
6089     reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
6090    
6091     @@ -510,6 +513,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
6092     struct arm_smmu_domain *smmu_domain = cookie;
6093     void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
6094    
6095     + if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
6096     + wmb();
6097     +
6098     writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
6099     }
6100    
6101     diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
6102     index b1b47a40a278..faa7d61b9d6c 100644
6103     --- a/drivers/irqchip/qcom-pdc.c
6104     +++ b/drivers/irqchip/qcom-pdc.c
6105     @@ -124,6 +124,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
6106     break;
6107     case IRQ_TYPE_EDGE_BOTH:
6108     pdc_type = PDC_EDGE_DUAL;
6109     + type = IRQ_TYPE_EDGE_RISING;
6110     break;
6111     case IRQ_TYPE_LEVEL_HIGH:
6112     pdc_type = PDC_LEVEL_HIGH;
6113     diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
6114     index 00984b486fea..2940cdc87af1 100644
6115     --- a/drivers/lightnvm/pblk-core.c
6116     +++ b/drivers/lightnvm/pblk-core.c
6117     @@ -1539,13 +1539,14 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
6118     struct pblk_line *cur, *new = NULL;
6119     unsigned int left_seblks;
6120    
6121     - cur = l_mg->data_line;
6122     new = l_mg->data_next;
6123     if (!new)
6124     goto out;
6125     - l_mg->data_line = new;
6126    
6127     spin_lock(&l_mg->free_lock);
6128     + cur = l_mg->data_line;
6129     + l_mg->data_line = new;
6130     +
6131     pblk_line_setup_metadata(new, l_mg, &pblk->lm);
6132     spin_unlock(&l_mg->free_lock);
6133    
6134     diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
6135     index e232e47e1353..df75d9caec45 100644
6136     --- a/drivers/lightnvm/pblk-recovery.c
6137     +++ b/drivers/lightnvm/pblk-recovery.c
6138     @@ -956,12 +956,14 @@ next:
6139     }
6140     }
6141    
6142     - spin_lock(&l_mg->free_lock);
6143     if (!open_lines) {
6144     + spin_lock(&l_mg->free_lock);
6145     WARN_ON_ONCE(!test_and_clear_bit(meta_line,
6146     &l_mg->meta_bitmap));
6147     + spin_unlock(&l_mg->free_lock);
6148     pblk_line_replace_data(pblk);
6149     } else {
6150     + spin_lock(&l_mg->free_lock);
6151     /* Allocate next line for preparation */
6152     l_mg->data_next = pblk_line_get(pblk);
6153     if (l_mg->data_next) {
6154     @@ -969,8 +971,8 @@ next:
6155     l_mg->data_next->type = PBLK_LINETYPE_DATA;
6156     is_next = 1;
6157     }
6158     + spin_unlock(&l_mg->free_lock);
6159     }
6160     - spin_unlock(&l_mg->free_lock);
6161    
6162     if (is_next)
6163     pblk_line_erase(pblk, l_mg->data_next);
6164     diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
6165     index 9fc3dfa168b4..8d2ed510c04b 100644
6166     --- a/drivers/lightnvm/pblk-sysfs.c
6167     +++ b/drivers/lightnvm/pblk-sysfs.c
6168     @@ -262,8 +262,14 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
6169     sec_in_line = l_mg->data_line->sec_in_line;
6170     meta_weight = bitmap_weight(&l_mg->meta_bitmap,
6171     PBLK_DATA_LINES);
6172     - map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
6173     +
6174     + spin_lock(&l_mg->data_line->lock);
6175     + if (l_mg->data_line->map_bitmap)
6176     + map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
6177     lm->sec_per_line);
6178     + else
6179     + map_weight = 0;
6180     + spin_unlock(&l_mg->data_line->lock);
6181     }
6182     spin_unlock(&l_mg->free_lock);
6183    
6184     diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
6185     index ee774a86cf1e..879227d584e7 100644
6186     --- a/drivers/lightnvm/pblk-write.c
6187     +++ b/drivers/lightnvm/pblk-write.c
6188     @@ -417,12 +417,11 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
6189     rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
6190     }
6191    
6192     + spin_lock(&l_mg->close_lock);
6193     emeta->mem += rq_len;
6194     - if (emeta->mem >= lm->emeta_len[0]) {
6195     - spin_lock(&l_mg->close_lock);
6196     + if (emeta->mem >= lm->emeta_len[0])
6197     list_del(&meta_line->list);
6198     - spin_unlock(&l_mg->close_lock);
6199     - }
6200     + spin_unlock(&l_mg->close_lock);
6201    
6202     pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
6203    
6204     @@ -491,14 +490,15 @@ static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
6205     struct pblk_line *meta_line;
6206    
6207     spin_lock(&l_mg->close_lock);
6208     -retry:
6209     if (list_empty(&l_mg->emeta_list)) {
6210     spin_unlock(&l_mg->close_lock);
6211     return NULL;
6212     }
6213     meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
6214     - if (meta_line->emeta->mem >= lm->emeta_len[0])
6215     - goto retry;
6216     + if (meta_line->emeta->mem >= lm->emeta_len[0]) {
6217     + spin_unlock(&l_mg->close_lock);
6218     + return NULL;
6219     + }
6220     spin_unlock(&l_mg->close_lock);
6221    
6222     if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
6223     diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
6224     index 311e91b1a14f..256f18b67e8a 100644
6225     --- a/drivers/mailbox/pcc.c
6226     +++ b/drivers/mailbox/pcc.c
6227     @@ -461,8 +461,11 @@ static int __init acpi_pcc_probe(void)
6228     count = acpi_table_parse_entries_array(ACPI_SIG_PCCT,
6229     sizeof(struct acpi_table_pcct), proc,
6230     ACPI_PCCT_TYPE_RESERVED, MAX_PCC_SUBSPACES);
6231     - if (count == 0 || count > MAX_PCC_SUBSPACES) {
6232     - pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
6233     + if (count <= 0 || count > MAX_PCC_SUBSPACES) {
6234     + if (count < 0)
6235     + pr_warn("Error parsing PCC subspaces from PCCT\n");
6236     + else
6237     + pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
6238     return -EINVAL;
6239     }
6240    
6241     diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
6242     index e7d4817681f2..3f4211b5cd33 100644
6243     --- a/drivers/md/bcache/btree.c
6244     +++ b/drivers/md/bcache/btree.c
6245     @@ -2434,7 +2434,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
6246     struct keybuf *buf = refill->buf;
6247     int ret = MAP_CONTINUE;
6248    
6249     - if (bkey_cmp(k, refill->end) >= 0) {
6250     + if (bkey_cmp(k, refill->end) > 0) {
6251     ret = MAP_DONE;
6252     goto out;
6253     }
6254     diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
6255     index 51be355a3309..22944aa7d8e5 100644
6256     --- a/drivers/md/bcache/request.c
6257     +++ b/drivers/md/bcache/request.c
6258     @@ -850,7 +850,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
6259    
6260     bch_mark_cache_accounting(s->iop.c, s->d,
6261     !s->cache_missed, s->iop.bypass);
6262     - trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
6263     + trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
6264    
6265     if (s->iop.status)
6266     continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
6267     @@ -1218,6 +1218,9 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
6268     {
6269     struct cached_dev *dc = container_of(d, struct cached_dev, disk);
6270    
6271     + if (dc->io_disable)
6272     + return -EIO;
6273     +
6274     return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
6275     }
6276    
6277     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
6278     index 30ba9aeb5ee8..03bb5cee2b83 100644
6279     --- a/drivers/md/bcache/super.c
6280     +++ b/drivers/md/bcache/super.c
6281     @@ -643,10 +643,6 @@ static int ioctl_dev(struct block_device *b, fmode_t mode,
6282     unsigned int cmd, unsigned long arg)
6283     {
6284     struct bcache_device *d = b->bd_disk->private_data;
6285     - struct cached_dev *dc = container_of(d, struct cached_dev, disk);
6286     -
6287     - if (dc->io_disable)
6288     - return -EIO;
6289    
6290     return d->ioctl(d, mode, cmd, arg);
6291     }
6292     @@ -1152,11 +1148,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
6293     }
6294    
6295     if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
6296     - bch_sectors_dirty_init(&dc->disk);
6297     atomic_set(&dc->has_dirty, 1);
6298     bch_writeback_queue(dc);
6299     }
6300    
6301     + bch_sectors_dirty_init(&dc->disk);
6302     +
6303     bch_cached_dev_run(dc);
6304     bcache_device_link(&dc->disk, c, "bdev");
6305     atomic_inc(&c->attached_dev_nr);
6306     diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
6307     index 150cf4f4cf74..26f035a0c5b9 100644
6308     --- a/drivers/md/bcache/sysfs.c
6309     +++ b/drivers/md/bcache/sysfs.c
6310     @@ -285,6 +285,7 @@ STORE(__cached_dev)
6311     1, WRITEBACK_RATE_UPDATE_SECS_MAX);
6312     d_strtoul(writeback_rate_i_term_inverse);
6313     d_strtoul_nonzero(writeback_rate_p_term_inverse);
6314     + d_strtoul_nonzero(writeback_rate_minimum);
6315    
6316     sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
6317    
6318     @@ -412,6 +413,7 @@ static struct attribute *bch_cached_dev_files[] = {
6319     &sysfs_writeback_rate_update_seconds,
6320     &sysfs_writeback_rate_i_term_inverse,
6321     &sysfs_writeback_rate_p_term_inverse,
6322     + &sysfs_writeback_rate_minimum,
6323     &sysfs_writeback_rate_debug,
6324     &sysfs_errors,
6325     &sysfs_io_error_limit,
6326     diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
6327     index b810ea77e6b1..f666778ad237 100644
6328     --- a/drivers/md/dm-ioctl.c
6329     +++ b/drivers/md/dm-ioctl.c
6330     @@ -1720,8 +1720,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
6331     }
6332    
6333     static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
6334     - int ioctl_flags,
6335     - struct dm_ioctl **param, int *param_flags)
6336     + int ioctl_flags, struct dm_ioctl **param, int *param_flags)
6337     {
6338     struct dm_ioctl *dmi;
6339     int secure_data;
6340     @@ -1762,18 +1761,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
6341    
6342     *param_flags |= DM_PARAMS_MALLOC;
6343    
6344     - if (copy_from_user(dmi, user, param_kernel->data_size))
6345     - goto bad;
6346     + /* Copy from param_kernel (which was already copied from user) */
6347     + memcpy(dmi, param_kernel, minimum_data_size);
6348    
6349     -data_copied:
6350     - /*
6351     - * Abort if something changed the ioctl data while it was being copied.
6352     - */
6353     - if (dmi->data_size != param_kernel->data_size) {
6354     - DMERR("rejecting ioctl: data size modified while processing parameters");
6355     + if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
6356     + param_kernel->data_size - minimum_data_size))
6357     goto bad;
6358     - }
6359     -
6360     +data_copied:
6361     /* Wipe the user buffer so we do not return it to userspace */
6362     if (secure_data && clear_user(user, param_kernel->data_size))
6363     goto bad;
6364     diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
6365     index 969954915566..fa68336560c3 100644
6366     --- a/drivers/md/dm-zoned-metadata.c
6367     +++ b/drivers/md/dm-zoned-metadata.c
6368     @@ -99,7 +99,7 @@ struct dmz_mblock {
6369     struct rb_node node;
6370     struct list_head link;
6371     sector_t no;
6372     - atomic_t ref;
6373     + unsigned int ref;
6374     unsigned long state;
6375     struct page *page;
6376     void *data;
6377     @@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
6378    
6379     RB_CLEAR_NODE(&mblk->node);
6380     INIT_LIST_HEAD(&mblk->link);
6381     - atomic_set(&mblk->ref, 0);
6382     + mblk->ref = 0;
6383     mblk->state = 0;
6384     mblk->no = mblk_no;
6385     mblk->data = page_address(mblk->page);
6386     @@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
6387     }
6388    
6389     /*
6390     - * Lookup a metadata block in the rbtree.
6391     + * Lookup a metadata block in the rbtree. If the block is found, increment
6392     + * its reference count.
6393     */
6394     -static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
6395     - sector_t mblk_no)
6396     +static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
6397     + sector_t mblk_no)
6398     {
6399     struct rb_root *root = &zmd->mblk_rbtree;
6400     struct rb_node *node = root->rb_node;
6401     @@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
6402    
6403     while (node) {
6404     mblk = container_of(node, struct dmz_mblock, node);
6405     - if (mblk->no == mblk_no)
6406     + if (mblk->no == mblk_no) {
6407     + /*
6408     + * If this is the first reference to the block,
6409     + * remove it from the LRU list.
6410     + */
6411     + mblk->ref++;
6412     + if (mblk->ref == 1 &&
6413     + !test_bit(DMZ_META_DIRTY, &mblk->state))
6414     + list_del_init(&mblk->link);
6415     return mblk;
6416     + }
6417     node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
6418     }
6419    
6420     @@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio)
6421     }
6422    
6423     /*
6424     - * Read a metadata block from disk.
6425     + * Read an uncached metadata block from disk and add it to the cache.
6426     */
6427     -static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
6428     - sector_t mblk_no)
6429     +static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
6430     + sector_t mblk_no)
6431     {
6432     - struct dmz_mblock *mblk;
6433     + struct dmz_mblock *mblk, *m;
6434     sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
6435     struct bio *bio;
6436    
6437     - /* Get block and insert it */
6438     + /* Get a new block and a BIO to read it */
6439     mblk = dmz_alloc_mblock(zmd, mblk_no);
6440     if (!mblk)
6441     return NULL;
6442    
6443     - spin_lock(&zmd->mblk_lock);
6444     - atomic_inc(&mblk->ref);
6445     - set_bit(DMZ_META_READING, &mblk->state);
6446     - dmz_insert_mblock(zmd, mblk);
6447     - spin_unlock(&zmd->mblk_lock);
6448     -
6449     bio = bio_alloc(GFP_NOIO, 1);
6450     if (!bio) {
6451     dmz_free_mblock(zmd, mblk);
6452     return NULL;
6453     }
6454    
6455     + spin_lock(&zmd->mblk_lock);
6456     +
6457     + /*
6458     + * Make sure that another context did not start reading
6459     + * the block already.
6460     + */
6461     + m = dmz_get_mblock_fast(zmd, mblk_no);
6462     + if (m) {
6463     + spin_unlock(&zmd->mblk_lock);
6464     + dmz_free_mblock(zmd, mblk);
6465     + bio_put(bio);
6466     + return m;
6467     + }
6468     +
6469     + mblk->ref++;
6470     + set_bit(DMZ_META_READING, &mblk->state);
6471     + dmz_insert_mblock(zmd, mblk);
6472     +
6473     + spin_unlock(&zmd->mblk_lock);
6474     +
6475     + /* Submit read BIO */
6476     bio->bi_iter.bi_sector = dmz_blk2sect(block);
6477     bio_set_dev(bio, zmd->dev->bdev);
6478     bio->bi_private = mblk;
6479     @@ -484,7 +509,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
6480    
6481     spin_lock(&zmd->mblk_lock);
6482    
6483     - if (atomic_dec_and_test(&mblk->ref)) {
6484     + mblk->ref--;
6485     + if (mblk->ref == 0) {
6486     if (test_bit(DMZ_META_ERROR, &mblk->state)) {
6487     rb_erase(&mblk->node, &zmd->mblk_rbtree);
6488     dmz_free_mblock(zmd, mblk);
6489     @@ -508,18 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
6490    
6491     /* Check rbtree */
6492     spin_lock(&zmd->mblk_lock);
6493     - mblk = dmz_lookup_mblock(zmd, mblk_no);
6494     - if (mblk) {
6495     - /* Cache hit: remove block from LRU list */
6496     - if (atomic_inc_return(&mblk->ref) == 1 &&
6497     - !test_bit(DMZ_META_DIRTY, &mblk->state))
6498     - list_del_init(&mblk->link);
6499     - }
6500     + mblk = dmz_get_mblock_fast(zmd, mblk_no);
6501     spin_unlock(&zmd->mblk_lock);
6502    
6503     if (!mblk) {
6504     /* Cache miss: read the block from disk */
6505     - mblk = dmz_fetch_mblock(zmd, mblk_no);
6506     + mblk = dmz_get_mblock_slow(zmd, mblk_no);
6507     if (!mblk)
6508     return ERR_PTR(-ENOMEM);
6509     }
6510     @@ -753,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
6511    
6512     spin_lock(&zmd->mblk_lock);
6513     clear_bit(DMZ_META_DIRTY, &mblk->state);
6514     - if (atomic_read(&mblk->ref) == 0)
6515     + if (mblk->ref == 0)
6516     list_add_tail(&mblk->link, &zmd->mblk_lru_list);
6517     spin_unlock(&zmd->mblk_lock);
6518     }
6519     @@ -2308,7 +2328,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
6520     mblk = list_first_entry(&zmd->mblk_dirty_list,
6521     struct dmz_mblock, link);
6522     dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
6523     - (u64)mblk->no, atomic_read(&mblk->ref));
6524     + (u64)mblk->no, mblk->ref);
6525     list_del_init(&mblk->link);
6526     rb_erase(&mblk->node, &zmd->mblk_rbtree);
6527     dmz_free_mblock(zmd, mblk);
6528     @@ -2326,8 +2346,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
6529     root = &zmd->mblk_rbtree;
6530     rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
6531     dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
6532     - (u64)mblk->no, atomic_read(&mblk->ref));
6533     - atomic_set(&mblk->ref, 0);
6534     + (u64)mblk->no, mblk->ref);
6535     + mblk->ref = 0;
6536     dmz_free_mblock(zmd, mblk);
6537     }
6538    
6539     diff --git a/drivers/md/md.c b/drivers/md/md.c
6540     index 63ceabb4e020..8668793262d0 100644
6541     --- a/drivers/md/md.c
6542     +++ b/drivers/md/md.c
6543     @@ -452,10 +452,11 @@ static void md_end_flush(struct bio *fbio)
6544     rdev_dec_pending(rdev, mddev);
6545    
6546     if (atomic_dec_and_test(&fi->flush_pending)) {
6547     - if (bio->bi_iter.bi_size == 0)
6548     + if (bio->bi_iter.bi_size == 0) {
6549     /* an empty barrier - all done */
6550     bio_endio(bio);
6551     - else {
6552     + mempool_free(fi, mddev->flush_pool);
6553     + } else {
6554     INIT_WORK(&fi->flush_work, submit_flushes);
6555     queue_work(md_wq, &fi->flush_work);
6556     }
6557     @@ -509,10 +510,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
6558     rcu_read_unlock();
6559    
6560     if (atomic_dec_and_test(&fi->flush_pending)) {
6561     - if (bio->bi_iter.bi_size == 0)
6562     + if (bio->bi_iter.bi_size == 0) {
6563     /* an empty barrier - all done */
6564     bio_endio(bio);
6565     - else {
6566     + mempool_free(fi, mddev->flush_pool);
6567     + } else {
6568     INIT_WORK(&fi->flush_work, submit_flushes);
6569     queue_work(md_wq, &fi->flush_work);
6570     }
6571     @@ -5904,14 +5906,6 @@ static void __md_stop(struct mddev *mddev)
6572     mddev->to_remove = &md_redundancy_group;
6573     module_put(pers->owner);
6574     clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
6575     -}
6576     -
6577     -void md_stop(struct mddev *mddev)
6578     -{
6579     - /* stop the array and free an attached data structures.
6580     - * This is called from dm-raid
6581     - */
6582     - __md_stop(mddev);
6583     if (mddev->flush_bio_pool) {
6584     mempool_destroy(mddev->flush_bio_pool);
6585     mddev->flush_bio_pool = NULL;
6586     @@ -5920,6 +5914,14 @@ void md_stop(struct mddev *mddev)
6587     mempool_destroy(mddev->flush_pool);
6588     mddev->flush_pool = NULL;
6589     }
6590     +}
6591     +
6592     +void md_stop(struct mddev *mddev)
6593     +{
6594     + /* stop the array and free an attached data structures.
6595     + * This is called from dm-raid
6596     + */
6597     + __md_stop(mddev);
6598     bioset_exit(&mddev->bio_set);
6599     bioset_exit(&mddev->sync_set);
6600     }
6601     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
6602     index 4e990246225e..1d54109071cc 100644
6603     --- a/drivers/md/raid1.c
6604     +++ b/drivers/md/raid1.c
6605     @@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
6606     */
6607     if (rdev->saved_raid_disk >= 0 &&
6608     rdev->saved_raid_disk >= first &&
6609     + rdev->saved_raid_disk < conf->raid_disks &&
6610     conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
6611     first = last = rdev->saved_raid_disk;
6612    
6613     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
6614     index d6f7978b4449..811427e53126 100644
6615     --- a/drivers/md/raid10.c
6616     +++ b/drivers/md/raid10.c
6617     @@ -1808,6 +1808,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
6618     first = last = rdev->raid_disk;
6619    
6620     if (rdev->saved_raid_disk >= first &&
6621     + rdev->saved_raid_disk < conf->geo.raid_disks &&
6622     conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
6623     mirror = rdev->saved_raid_disk;
6624     else
6625     diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
6626     index 030b2602faf0..dd8bad74a1f0 100644
6627     --- a/drivers/media/cec/cec-adap.c
6628     +++ b/drivers/media/cec/cec-adap.c
6629     @@ -341,7 +341,7 @@ static void cec_data_completed(struct cec_data *data)
6630     *
6631     * This function is called with adap->lock held.
6632     */
6633     -static void cec_data_cancel(struct cec_data *data)
6634     +static void cec_data_cancel(struct cec_data *data, u8 tx_status)
6635     {
6636     /*
6637     * It's either the current transmit, or it is a pending
6638     @@ -356,13 +356,11 @@ static void cec_data_cancel(struct cec_data *data)
6639     }
6640    
6641     if (data->msg.tx_status & CEC_TX_STATUS_OK) {
6642     - /* Mark the canceled RX as a timeout */
6643     data->msg.rx_ts = ktime_get_ns();
6644     - data->msg.rx_status = CEC_RX_STATUS_TIMEOUT;
6645     + data->msg.rx_status = CEC_RX_STATUS_ABORTED;
6646     } else {
6647     - /* Mark the canceled TX as an error */
6648     data->msg.tx_ts = ktime_get_ns();
6649     - data->msg.tx_status |= CEC_TX_STATUS_ERROR |
6650     + data->msg.tx_status |= tx_status |
6651     CEC_TX_STATUS_MAX_RETRIES;
6652     data->msg.tx_error_cnt++;
6653     data->attempts = 0;
6654     @@ -390,15 +388,15 @@ static void cec_flush(struct cec_adapter *adap)
6655     while (!list_empty(&adap->transmit_queue)) {
6656     data = list_first_entry(&adap->transmit_queue,
6657     struct cec_data, list);
6658     - cec_data_cancel(data);
6659     + cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
6660     }
6661     if (adap->transmitting)
6662     - cec_data_cancel(adap->transmitting);
6663     + cec_data_cancel(adap->transmitting, CEC_TX_STATUS_ABORTED);
6664    
6665     /* Cancel the pending timeout work. */
6666     list_for_each_entry_safe(data, n, &adap->wait_queue, list) {
6667     if (cancel_delayed_work(&data->work))
6668     - cec_data_cancel(data);
6669     + cec_data_cancel(data, CEC_TX_STATUS_OK);
6670     /*
6671     * If cancel_delayed_work returned false, then
6672     * the cec_wait_timeout function is running,
6673     @@ -474,12 +472,13 @@ int cec_thread_func(void *_adap)
6674     * so much traffic on the bus that the adapter was
6675     * unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
6676     */
6677     - dprintk(1, "%s: message %*ph timed out\n", __func__,
6678     + pr_warn("cec-%s: message %*ph timed out\n", adap->name,
6679     adap->transmitting->msg.len,
6680     adap->transmitting->msg.msg);
6681     adap->tx_timeouts++;
6682     /* Just give up on this. */
6683     - cec_data_cancel(adap->transmitting);
6684     + cec_data_cancel(adap->transmitting,
6685     + CEC_TX_STATUS_TIMEOUT);
6686     goto unlock;
6687     }
6688    
6689     @@ -514,9 +513,11 @@ int cec_thread_func(void *_adap)
6690     if (data->attempts) {
6691     /* should be >= 3 data bit periods for a retry */
6692     signal_free_time = CEC_SIGNAL_FREE_TIME_RETRY;
6693     - } else if (data->new_initiator) {
6694     + } else if (adap->last_initiator !=
6695     + cec_msg_initiator(&data->msg)) {
6696     /* should be >= 5 data bit periods for new initiator */
6697     signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
6698     + adap->last_initiator = cec_msg_initiator(&data->msg);
6699     } else {
6700     /*
6701     * should be >= 7 data bit periods for sending another
6702     @@ -530,7 +531,7 @@ int cec_thread_func(void *_adap)
6703     /* Tell the adapter to transmit, cancel on error */
6704     if (adap->ops->adap_transmit(adap, data->attempts,
6705     signal_free_time, &data->msg))
6706     - cec_data_cancel(data);
6707     + cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
6708    
6709     unlock:
6710     mutex_unlock(&adap->lock);
6711     @@ -701,9 +702,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
6712     struct cec_fh *fh, bool block)
6713     {
6714     struct cec_data *data;
6715     - u8 last_initiator = 0xff;
6716     - unsigned int timeout;
6717     - int res = 0;
6718    
6719     msg->rx_ts = 0;
6720     msg->tx_ts = 0;
6721     @@ -813,23 +811,6 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
6722     data->adap = adap;
6723     data->blocking = block;
6724    
6725     - /*
6726     - * Determine if this message follows a message from the same
6727     - * initiator. Needed to determine the free signal time later on.
6728     - */
6729     - if (msg->len > 1) {
6730     - if (!(list_empty(&adap->transmit_queue))) {
6731     - const struct cec_data *last;
6732     -
6733     - last = list_last_entry(&adap->transmit_queue,
6734     - const struct cec_data, list);
6735     - last_initiator = cec_msg_initiator(&last->msg);
6736     - } else if (adap->transmitting) {
6737     - last_initiator =
6738     - cec_msg_initiator(&adap->transmitting->msg);
6739     - }
6740     - }
6741     - data->new_initiator = last_initiator != cec_msg_initiator(msg);
6742     init_completion(&data->c);
6743     INIT_DELAYED_WORK(&data->work, cec_wait_timeout);
6744    
6745     @@ -845,48 +826,23 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
6746     if (!block)
6747     return 0;
6748    
6749     - /*
6750     - * If we don't get a completion before this time something is really
6751     - * wrong and we time out.
6752     - */
6753     - timeout = CEC_XFER_TIMEOUT_MS;
6754     - /* Add the requested timeout if we have to wait for a reply as well */
6755     - if (msg->timeout)
6756     - timeout += msg->timeout;
6757     -
6758     /*
6759     * Release the lock and wait, retake the lock afterwards.
6760     */
6761     mutex_unlock(&adap->lock);
6762     - res = wait_for_completion_killable_timeout(&data->c,
6763     - msecs_to_jiffies(timeout));
6764     + wait_for_completion_killable(&data->c);
6765     + if (!data->completed)
6766     + cancel_delayed_work_sync(&data->work);
6767     mutex_lock(&adap->lock);
6768    
6769     - if (data->completed) {
6770     - /* The transmit completed (possibly with an error) */
6771     - *msg = data->msg;
6772     - kfree(data);
6773     - return 0;
6774     - }
6775     - /*
6776     - * The wait for completion timed out or was interrupted, so mark this
6777     - * as non-blocking and disconnect from the filehandle since it is
6778     - * still 'in flight'. When it finally completes it will just drop the
6779     - * result silently.
6780     - */
6781     - data->blocking = false;
6782     - if (data->fh)
6783     - list_del(&data->xfer_list);
6784     - data->fh = NULL;
6785     + /* Cancel the transmit if it was interrupted */
6786     + if (!data->completed)
6787     + cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
6788    
6789     - if (res == 0) { /* timed out */
6790     - /* Check if the reply or the transmit failed */
6791     - if (msg->timeout && (msg->tx_status & CEC_TX_STATUS_OK))
6792     - msg->rx_status = CEC_RX_STATUS_TIMEOUT;
6793     - else
6794     - msg->tx_status = CEC_TX_STATUS_MAX_RETRIES;
6795     - }
6796     - return res > 0 ? 0 : res;
6797     + /* The transmit completed (possibly with an error) */
6798     + *msg = data->msg;
6799     + kfree(data);
6800     + return 0;
6801     }
6802    
6803     /* Helper function to be used by drivers and this framework. */
6804     @@ -1044,6 +1000,8 @@ void cec_received_msg_ts(struct cec_adapter *adap,
6805     mutex_lock(&adap->lock);
6806     dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
6807    
6808     + adap->last_initiator = 0xff;
6809     +
6810     /* Check if this message was for us (directed or broadcast). */
6811     if (!cec_msg_is_broadcast(msg))
6812     valid_la = cec_has_log_addr(adap, msg_dest);
6813     @@ -1506,6 +1464,8 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
6814     }
6815    
6816     mutex_lock(&adap->devnode.lock);
6817     + adap->last_initiator = 0xff;
6818     +
6819     if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
6820     adap->ops->adap_enable(adap, true)) {
6821     mutex_unlock(&adap->devnode.lock);
6822     diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
6823     index b6536bbad530..4961573850d5 100644
6824     --- a/drivers/media/cec/cec-api.c
6825     +++ b/drivers/media/cec/cec-api.c
6826     @@ -101,6 +101,23 @@ static long cec_adap_g_phys_addr(struct cec_adapter *adap,
6827     return 0;
6828     }
6829    
6830     +static int cec_validate_phys_addr(u16 phys_addr)
6831     +{
6832     + int i;
6833     +
6834     + if (phys_addr == CEC_PHYS_ADDR_INVALID)
6835     + return 0;
6836     + for (i = 0; i < 16; i += 4)
6837     + if (phys_addr & (0xf << i))
6838     + break;
6839     + if (i == 16)
6840     + return 0;
6841     + for (i += 4; i < 16; i += 4)
6842     + if ((phys_addr & (0xf << i)) == 0)
6843     + return -EINVAL;
6844     + return 0;
6845     +}
6846     +
6847     static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
6848     bool block, __u16 __user *parg)
6849     {
6850     @@ -112,7 +129,7 @@ static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
6851     if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
6852     return -EFAULT;
6853    
6854     - err = cec_phys_addr_validate(phys_addr, NULL, NULL);
6855     + err = cec_validate_phys_addr(phys_addr);
6856     if (err)
6857     return err;
6858     mutex_lock(&adap->lock);
6859     diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c
6860     index ec72ac1c0b91..f587e8eaefd8 100644
6861     --- a/drivers/media/cec/cec-edid.c
6862     +++ b/drivers/media/cec/cec-edid.c
6863     @@ -10,66 +10,6 @@
6864     #include <linux/types.h>
6865     #include <media/cec.h>
6866    
6867     -/*
6868     - * This EDID is expected to be a CEA-861 compliant, which means that there are
6869     - * at least two blocks and one or more of the extensions blocks are CEA-861
6870     - * blocks.
6871     - *
6872     - * The returned location is guaranteed to be < size - 1.
6873     - */
6874     -static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
6875     -{
6876     - unsigned int blocks = size / 128;
6877     - unsigned int block;
6878     - u8 d;
6879     -
6880     - /* Sanity check: at least 2 blocks and a multiple of the block size */
6881     - if (blocks < 2 || size % 128)
6882     - return 0;
6883     -
6884     - /*
6885     - * If there are fewer extension blocks than the size, then update
6886     - * 'blocks'. It is allowed to have more extension blocks than the size,
6887     - * since some hardware can only read e.g. 256 bytes of the EDID, even
6888     - * though more blocks are present. The first CEA-861 extension block
6889     - * should normally be in block 1 anyway.
6890     - */
6891     - if (edid[0x7e] + 1 < blocks)
6892     - blocks = edid[0x7e] + 1;
6893     -
6894     - for (block = 1; block < blocks; block++) {
6895     - unsigned int offset = block * 128;
6896     -
6897     - /* Skip any non-CEA-861 extension blocks */
6898     - if (edid[offset] != 0x02 || edid[offset + 1] != 0x03)
6899     - continue;
6900     -
6901     - /* search Vendor Specific Data Block (tag 3) */
6902     - d = edid[offset + 2] & 0x7f;
6903     - /* Check if there are Data Blocks */
6904     - if (d <= 4)
6905     - continue;
6906     - if (d > 4) {
6907     - unsigned int i = offset + 4;
6908     - unsigned int end = offset + d;
6909     -
6910     - /* Note: 'end' is always < 'size' */
6911     - do {
6912     - u8 tag = edid[i] >> 5;
6913     - u8 len = edid[i] & 0x1f;
6914     -
6915     - if (tag == 3 && len >= 5 && i + len <= end &&
6916     - edid[i + 1] == 0x03 &&
6917     - edid[i + 2] == 0x0c &&
6918     - edid[i + 3] == 0x00)
6919     - return i + 4;
6920     - i += len + 1;
6921     - } while (i < end);
6922     - }
6923     - }
6924     - return 0;
6925     -}
6926     -
6927     u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
6928     unsigned int *offset)
6929     {
6930     diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
6931     index 3a3dc23c560c..a4341205c197 100644
6932     --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
6933     +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
6934     @@ -602,14 +602,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
6935     [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 },
6936     [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 },
6937     [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
6938     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
6939     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 },
6940     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 },
6941     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 },
6942     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 },
6943     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 },
6944     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 },
6945     - [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
6946     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
6947     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 },
6948     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 },
6949     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 },
6950     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 },
6951     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 },
6952     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 },
6953     + [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
6954     [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
6955     [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 },
6956     [V4L2_COLORSPACE_SMPTE170M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 },
6957     @@ -658,14 +658,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
6958     [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][5] = { 3138, 657, 810 },
6959     [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][6] = { 731, 680, 3048 },
6960     [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
6961     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
6962     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][1] = { 3046, 3054, 886 },
6963     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][2] = { 0, 3058, 3031 },
6964     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][3] = { 360, 3079, 877 },
6965     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][4] = { 3103, 587, 3027 },
6966     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][5] = { 3116, 723, 861 },
6967     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][6] = { 789, 744, 3025 },
6968     - [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
6969     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
6970     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][1] = { 3046, 3054, 886 },
6971     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][2] = { 0, 3058, 3031 },
6972     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][3] = { 360, 3079, 877 },
6973     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][4] = { 3103, 587, 3027 },
6974     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][5] = { 3116, 723, 861 },
6975     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][6] = { 789, 744, 3025 },
6976     + [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
6977     [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
6978     [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2941, 2950, 546 },
6979     [V4L2_COLORSPACE_SMPTE240M][V4L2_XFER_FUNC_SMPTE240M][2] = { 0, 2954, 2924 },
6980     @@ -714,14 +714,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
6981     [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 },
6982     [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 },
6983     [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
6984     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
6985     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 },
6986     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 },
6987     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 },
6988     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 },
6989     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 },
6990     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 },
6991     - [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
6992     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
6993     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 },
6994     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 },
6995     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 },
6996     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 },
6997     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 },
6998     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 },
6999     + [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
7000     [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7001     [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 },
7002     [V4L2_COLORSPACE_REC709][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 },
7003     @@ -770,14 +770,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
7004     [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][5] = { 2599, 901, 909 },
7005     [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][6] = { 991, 0, 2966 },
7006     [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SRGB][7] = { 800, 799, 800 },
7007     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
7008     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][1] = { 2989, 3120, 1180 },
7009     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][2] = { 1913, 3011, 3009 },
7010     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][3] = { 1836, 3099, 1105 },
7011     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][4] = { 2627, 413, 2966 },
7012     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][5] = { 2576, 943, 951 },
7013     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][6] = { 1026, 0, 2942 },
7014     - [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
7015     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
7016     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][1] = { 2989, 3120, 1180 },
7017     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][2] = { 1913, 3011, 3009 },
7018     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][3] = { 1836, 3099, 1105 },
7019     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][4] = { 2627, 413, 2966 },
7020     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][5] = { 2576, 943, 951 },
7021     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][6] = { 1026, 0, 2942 },
7022     + [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
7023     [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7024     [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][1] = { 2879, 3022, 874 },
7025     [V4L2_COLORSPACE_470_SYSTEM_M][V4L2_XFER_FUNC_SMPTE240M][2] = { 1688, 2903, 2901 },
7026     @@ -826,14 +826,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
7027     [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][5] = { 3001, 800, 799 },
7028     [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3071 },
7029     [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 },
7030     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
7031     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 776 },
7032     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][2] = { 1068, 3033, 3033 },
7033     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][3] = { 1068, 3033, 776 },
7034     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][4] = { 2977, 851, 3048 },
7035     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][5] = { 2977, 851, 851 },
7036     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3048 },
7037     - [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
7038     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
7039     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 776 },
7040     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][2] = { 1068, 3033, 3033 },
7041     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][3] = { 1068, 3033, 776 },
7042     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][4] = { 2977, 851, 3048 },
7043     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][5] = { 2977, 851, 851 },
7044     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3048 },
7045     + [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
7046     [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7047     [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 423 },
7048     [V4L2_COLORSPACE_470_SYSTEM_BG][V4L2_XFER_FUNC_SMPTE240M][2] = { 749, 2926, 2926 },
7049     @@ -882,14 +882,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
7050     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][5] = { 3056, 800, 800 },
7051     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3056 },
7052     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
7053     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
7054     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 851 },
7055     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 851, 3033, 3033 },
7056     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 851, 3033, 851 },
7057     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 3033, 851, 3033 },
7058     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 3033, 851, 851 },
7059     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 3033 },
7060     - [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
7061     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
7062     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 851 },
7063     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][2] = { 851, 3033, 3033 },
7064     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][3] = { 851, 3033, 851 },
7065     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][4] = { 3033, 851, 3033 },
7066     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][5] = { 3033, 851, 851 },
7067     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 3033 },
7068     + [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
7069     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7070     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 507 },
7071     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 507, 2926, 2926 },
7072     @@ -922,62 +922,62 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
7073     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1812, 886, 886 },
7074     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1812 },
7075     [V4L2_COLORSPACE_SRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
7076     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
7077     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 },
7078     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 },
7079     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 },
7080     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 },
7081     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 },
7082     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 },
7083     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
7084     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
7085     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 },
7086     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 },
7087     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 },
7088     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 },
7089     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 },
7090     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 },
7091     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
7092     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
7093     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][1] = { 3033, 3033, 1063 },
7094     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][2] = { 1828, 3033, 3033 },
7095     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][3] = { 1828, 3033, 1063 },
7096     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][4] = { 2633, 851, 2979 },
7097     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][5] = { 2633, 851, 851 },
7098     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][6] = { 851, 851, 2979 },
7099     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
7100     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7101     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 },
7102     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 },
7103     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 },
7104     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 },
7105     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 },
7106     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 },
7107     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
7108     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
7109     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 },
7110     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 },
7111     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 },
7112     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 },
7113     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 },
7114     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 },
7115     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
7116     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
7117     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 },
7118     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 },
7119     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 },
7120     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 },
7121     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 },
7122     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 },
7123     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
7124     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
7125     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 },
7126     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 },
7127     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 },
7128     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 },
7129     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 },
7130     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 },
7131     - [V4L2_COLORSPACE_ADOBERGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
7132     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
7133     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][1] = { 2939, 2939, 781 },
7134     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][2] = { 1622, 2939, 2939 },
7135     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][3] = { 1622, 2939, 781 },
7136     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][4] = { 2502, 547, 2881 },
7137     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][5] = { 2502, 547, 547 },
7138     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][6] = { 547, 547, 2881 },
7139     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_709][7] = { 547, 547, 547 },
7140     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][0] = { 3056, 3056, 3056 },
7141     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][1] = { 3056, 3056, 1031 },
7142     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][2] = { 1838, 3056, 3056 },
7143     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][3] = { 1838, 3056, 1031 },
7144     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][4] = { 2657, 800, 3002 },
7145     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][5] = { 2657, 800, 800 },
7146     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][6] = { 800, 800, 3002 },
7147     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
7148     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
7149     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][1] = { 3033, 3033, 1063 },
7150     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][2] = { 1828, 3033, 3033 },
7151     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][3] = { 1828, 3033, 1063 },
7152     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][4] = { 2633, 851, 2979 },
7153     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][5] = { 2633, 851, 851 },
7154     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][6] = { 851, 851, 2979 },
7155     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
7156     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7157     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][1] = { 2926, 2926, 744 },
7158     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][2] = { 1594, 2926, 2926 },
7159     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][3] = { 1594, 2926, 744 },
7160     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][4] = { 2484, 507, 2867 },
7161     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][5] = { 2484, 507, 507 },
7162     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][6] = { 507, 507, 2867 },
7163     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE240M][7] = { 507, 507, 507 },
7164     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][0] = { 2125, 2125, 2125 },
7165     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][1] = { 2125, 2125, 212 },
7166     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][2] = { 698, 2125, 2125 },
7167     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][3] = { 698, 2125, 212 },
7168     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][4] = { 1557, 130, 2043 },
7169     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][5] = { 1557, 130, 130 },
7170     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][6] = { 130, 130, 2043 },
7171     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_NONE][7] = { 130, 130, 130 },
7172     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][0] = { 3175, 3175, 3175 },
7173     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][1] = { 3175, 3175, 1308 },
7174     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][2] = { 2069, 3175, 3175 },
7175     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][3] = { 2069, 3175, 1308 },
7176     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][4] = { 2816, 1084, 3127 },
7177     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][5] = { 2816, 1084, 1084 },
7178     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][6] = { 1084, 1084, 3127 },
7179     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_DCI_P3][7] = { 1084, 1084, 1084 },
7180     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][0] = { 1812, 1812, 1812 },
7181     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][1] = { 1812, 1812, 1022 },
7182     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][2] = { 1402, 1812, 1812 },
7183     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][3] = { 1402, 1812, 1022 },
7184     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][4] = { 1692, 886, 1797 },
7185     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][5] = { 1692, 886, 886 },
7186     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][6] = { 886, 886, 1797 },
7187     + [V4L2_COLORSPACE_OPRGB][V4L2_XFER_FUNC_SMPTE2084][7] = { 886, 886, 886 },
7188     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][0] = { 2939, 2939, 2939 },
7189     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][1] = { 2877, 2923, 1058 },
7190     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_709][2] = { 1837, 2840, 2916 },
7191     @@ -994,14 +994,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
7192     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][5] = { 2517, 1159, 900 },
7193     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][6] = { 1042, 870, 2917 },
7194     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 800 },
7195     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
7196     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][1] = { 2976, 3018, 1315 },
7197     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][2] = { 2024, 2942, 3011 },
7198     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][3] = { 1930, 2926, 1256 },
7199     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][4] = { 2563, 1227, 2916 },
7200     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][5] = { 2494, 1183, 943 },
7201     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][6] = { 1073, 916, 2894 },
7202     - [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
7203     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
7204     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][1] = { 2976, 3018, 1315 },
7205     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][2] = { 2024, 2942, 3011 },
7206     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][3] = { 1930, 2926, 1256 },
7207     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][4] = { 2563, 1227, 2916 },
7208     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][5] = { 2494, 1183, 943 },
7209     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][6] = { 1073, 916, 2894 },
7210     + [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
7211     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7212     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][1] = { 2864, 2910, 1024 },
7213     [V4L2_COLORSPACE_BT2020][V4L2_XFER_FUNC_SMPTE240M][2] = { 1811, 2826, 2903 },
7214     @@ -1050,14 +1050,14 @@ const struct tpg_rbg_color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1][V4L2_XFE
7215     [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][5] = { 2880, 998, 902 },
7216     [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][6] = { 816, 823, 2940 },
7217     [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SRGB][7] = { 800, 800, 799 },
7218     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][0] = { 3033, 3033, 3033 },
7219     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][1] = { 3029, 3028, 1255 },
7220     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][2] = { 1406, 2988, 3011 },
7221     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][3] = { 1398, 2983, 1190 },
7222     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][4] = { 2860, 1050, 2939 },
7223     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][5] = { 2857, 1033, 945 },
7224     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][6] = { 866, 873, 2916 },
7225     - [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_ADOBERGB][7] = { 851, 851, 851 },
7226     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][0] = { 3033, 3033, 3033 },
7227     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][1] = { 3029, 3028, 1255 },
7228     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][2] = { 1406, 2988, 3011 },
7229     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][3] = { 1398, 2983, 1190 },
7230     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][4] = { 2860, 1050, 2939 },
7231     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][5] = { 2857, 1033, 945 },
7232     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][6] = { 866, 873, 2916 },
7233     + [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_OPRGB][7] = { 851, 851, 851 },
7234     [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][0] = { 2926, 2926, 2926 },
7235     [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][1] = { 2923, 2921, 957 },
7236     [V4L2_COLORSPACE_DCI_P3][V4L2_XFER_FUNC_SMPTE240M][2] = { 1125, 2877, 2902 },
7237     @@ -1128,7 +1128,7 @@ static const double rec709_to_240m[3][3] = {
7238     { 0.0016327, 0.0044133, 0.9939540 },
7239     };
7240    
7241     -static const double rec709_to_adobergb[3][3] = {
7242     +static const double rec709_to_oprgb[3][3] = {
7243     { 0.7151627, 0.2848373, -0.0000000 },
7244     { 0.0000000, 1.0000000, 0.0000000 },
7245     { -0.0000000, 0.0411705, 0.9588295 },
7246     @@ -1195,7 +1195,7 @@ static double transfer_rec709_to_rgb(double v)
7247     return (v < 0.081) ? v / 4.5 : pow((v + 0.099) / 1.099, 1.0 / 0.45);
7248     }
7249    
7250     -static double transfer_rgb_to_adobergb(double v)
7251     +static double transfer_rgb_to_oprgb(double v)
7252     {
7253     return pow(v, 1.0 / 2.19921875);
7254     }
7255     @@ -1251,8 +1251,8 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func,
7256     case V4L2_COLORSPACE_470_SYSTEM_M:
7257     mult_matrix(r, g, b, rec709_to_ntsc1953);
7258     break;
7259     - case V4L2_COLORSPACE_ADOBERGB:
7260     - mult_matrix(r, g, b, rec709_to_adobergb);
7261     + case V4L2_COLORSPACE_OPRGB:
7262     + mult_matrix(r, g, b, rec709_to_oprgb);
7263     break;
7264     case V4L2_COLORSPACE_BT2020:
7265     mult_matrix(r, g, b, rec709_to_bt2020);
7266     @@ -1284,10 +1284,10 @@ static void csc(enum v4l2_colorspace colorspace, enum v4l2_xfer_func xfer_func,
7267     *g = transfer_rgb_to_srgb(*g);
7268     *b = transfer_rgb_to_srgb(*b);
7269     break;
7270     - case V4L2_XFER_FUNC_ADOBERGB:
7271     - *r = transfer_rgb_to_adobergb(*r);
7272     - *g = transfer_rgb_to_adobergb(*g);
7273     - *b = transfer_rgb_to_adobergb(*b);
7274     + case V4L2_XFER_FUNC_OPRGB:
7275     + *r = transfer_rgb_to_oprgb(*r);
7276     + *g = transfer_rgb_to_oprgb(*g);
7277     + *b = transfer_rgb_to_oprgb(*b);
7278     break;
7279     case V4L2_XFER_FUNC_DCI_P3:
7280     *r = transfer_rgb_to_dcip3(*r);
7281     @@ -1321,7 +1321,7 @@ int main(int argc, char **argv)
7282     V4L2_COLORSPACE_470_SYSTEM_BG,
7283     0,
7284     V4L2_COLORSPACE_SRGB,
7285     - V4L2_COLORSPACE_ADOBERGB,
7286     + V4L2_COLORSPACE_OPRGB,
7287     V4L2_COLORSPACE_BT2020,
7288     0,
7289     V4L2_COLORSPACE_DCI_P3,
7290     @@ -1336,7 +1336,7 @@ int main(int argc, char **argv)
7291     "V4L2_COLORSPACE_470_SYSTEM_BG",
7292     "",
7293     "V4L2_COLORSPACE_SRGB",
7294     - "V4L2_COLORSPACE_ADOBERGB",
7295     + "V4L2_COLORSPACE_OPRGB",
7296     "V4L2_COLORSPACE_BT2020",
7297     "",
7298     "V4L2_COLORSPACE_DCI_P3",
7299     @@ -1345,7 +1345,7 @@ int main(int argc, char **argv)
7300     "",
7301     "V4L2_XFER_FUNC_709",
7302     "V4L2_XFER_FUNC_SRGB",
7303     - "V4L2_XFER_FUNC_ADOBERGB",
7304     + "V4L2_XFER_FUNC_OPRGB",
7305     "V4L2_XFER_FUNC_SMPTE240M",
7306     "V4L2_XFER_FUNC_NONE",
7307     "V4L2_XFER_FUNC_DCI_P3",
7308     diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
7309     index abd4c788dffd..f40ab5704bf0 100644
7310     --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
7311     +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
7312     @@ -1770,7 +1770,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
7313     pos[7] = (chr & (0x01 << 0) ? fg : bg); \
7314     } \
7315     \
7316     - pos += (tpg->hflip ? -8 : 8) / hdiv; \
7317     + pos += (tpg->hflip ? -8 : 8) / (int)hdiv; \
7318     } \
7319     } \
7320     } while (0)
7321     diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
7322     index 55c2ea0720d9..f3899cc84e27 100644
7323     --- a/drivers/media/i2c/adv7511.c
7324     +++ b/drivers/media/i2c/adv7511.c
7325     @@ -1355,10 +1355,10 @@ static int adv7511_set_fmt(struct v4l2_subdev *sd,
7326     state->xfer_func = format->format.xfer_func;
7327    
7328     switch (format->format.colorspace) {
7329     - case V4L2_COLORSPACE_ADOBERGB:
7330     + case V4L2_COLORSPACE_OPRGB:
7331     c = HDMI_COLORIMETRY_EXTENDED;
7332     - ec = y ? HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601 :
7333     - HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB;
7334     + ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 :
7335     + HDMI_EXTENDED_COLORIMETRY_OPRGB;
7336     break;
7337     case V4L2_COLORSPACE_SMPTE170M:
7338     c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
7339     diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
7340     index 668be2bca57a..c78698199ac5 100644
7341     --- a/drivers/media/i2c/adv7604.c
7342     +++ b/drivers/media/i2c/adv7604.c
7343     @@ -2284,8 +2284,10 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
7344     state->aspect_ratio.numerator = 16;
7345     state->aspect_ratio.denominator = 9;
7346    
7347     - if (!state->edid.present)
7348     + if (!state->edid.present) {
7349     state->edid.blocks = 0;
7350     + cec_phys_addr_invalidate(state->cec_adap);
7351     + }
7352    
7353     v4l2_dbg(2, debug, sd, "%s: clear EDID pad %d, edid.present = 0x%x\n",
7354     __func__, edid->pad, state->edid.present);
7355     @@ -2474,7 +2476,7 @@ static int adv76xx_log_status(struct v4l2_subdev *sd)
7356     "YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
7357     "xvYCC Bt.601", "xvYCC Bt.709",
7358     "YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
7359     - "sYCC", "Adobe YCC 601", "AdobeRGB", "invalid", "invalid",
7360     + "sYCC", "opYCC 601", "opRGB", "invalid", "invalid",
7361     "invalid", "invalid", "invalid"
7362     };
7363     static const char * const rgb_quantization_range_txt[] = {
7364     diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
7365     index 4f8fbdd00e35..71fe56565f75 100644
7366     --- a/drivers/media/i2c/adv7842.c
7367     +++ b/drivers/media/i2c/adv7842.c
7368     @@ -786,8 +786,10 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
7369     /* Disable I2C access to internal EDID ram from HDMI DDC ports */
7370     rep_write_and_or(sd, 0x77, 0xf3, 0x00);
7371    
7372     - if (!state->hdmi_edid.present)
7373     + if (!state->hdmi_edid.present) {
7374     + cec_phys_addr_invalidate(state->cec_adap);
7375     return 0;
7376     + }
7377    
7378     pa = cec_get_edid_phys_addr(edid, 256, &spa_loc);
7379     err = cec_phys_addr_validate(pa, &pa, NULL);
7380     diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
7381     index 31bf577b0bd3..64d1402882c8 100644
7382     --- a/drivers/media/i2c/ov7670.c
7383     +++ b/drivers/media/i2c/ov7670.c
7384     @@ -1808,17 +1808,24 @@ static int ov7670_probe(struct i2c_client *client,
7385     info->pclk_hb_disable = true;
7386     }
7387    
7388     - info->clk = devm_clk_get(&client->dev, "xclk");
7389     - if (IS_ERR(info->clk))
7390     - return PTR_ERR(info->clk);
7391     - ret = clk_prepare_enable(info->clk);
7392     - if (ret)
7393     - return ret;
7394     + info->clk = devm_clk_get(&client->dev, "xclk"); /* optional */
7395     + if (IS_ERR(info->clk)) {
7396     + ret = PTR_ERR(info->clk);
7397     + if (ret == -ENOENT)
7398     + info->clk = NULL;
7399     + else
7400     + return ret;
7401     + }
7402     + if (info->clk) {
7403     + ret = clk_prepare_enable(info->clk);
7404     + if (ret)
7405     + return ret;
7406    
7407     - info->clock_speed = clk_get_rate(info->clk) / 1000000;
7408     - if (info->clock_speed < 10 || info->clock_speed > 48) {
7409     - ret = -EINVAL;
7410     - goto clk_disable;
7411     + info->clock_speed = clk_get_rate(info->clk) / 1000000;
7412     + if (info->clock_speed < 10 || info->clock_speed > 48) {
7413     + ret = -EINVAL;
7414     + goto clk_disable;
7415     + }
7416     }
7417    
7418     ret = ov7670_init_gpio(client, info);
7419     diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
7420     index 44c41933415a..ff25ea9aca48 100644
7421     --- a/drivers/media/i2c/tc358743.c
7422     +++ b/drivers/media/i2c/tc358743.c
7423     @@ -1243,9 +1243,9 @@ static int tc358743_log_status(struct v4l2_subdev *sd)
7424     u8 vi_status3 = i2c_rd8(sd, VI_STATUS3);
7425     const int deep_color_mode[4] = { 8, 10, 12, 16 };
7426     static const char * const input_color_space[] = {
7427     - "RGB", "YCbCr 601", "Adobe RGB", "YCbCr 709", "NA (4)",
7428     + "RGB", "YCbCr 601", "opRGB", "YCbCr 709", "NA (4)",
7429     "xvYCC 601", "NA(6)", "xvYCC 709", "NA(8)", "sYCC601",
7430     - "NA(10)", "NA(11)", "NA(12)", "Adobe YCC 601"};
7431     + "NA(10)", "NA(11)", "NA(12)", "opYCC 601"};
7432    
7433     v4l2_info(sd, "-----Chip status-----\n");
7434     v4l2_info(sd, "Chip ID: 0x%02x\n",
7435     diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
7436     index 76e6bed5a1da..805bd9c65940 100644
7437     --- a/drivers/media/i2c/tvp5150.c
7438     +++ b/drivers/media/i2c/tvp5150.c
7439     @@ -1534,7 +1534,7 @@ static int tvp5150_probe(struct i2c_client *c,
7440     27000000, 1, 27000000);
7441     v4l2_ctrl_new_std_menu_items(&core->hdl, &tvp5150_ctrl_ops,
7442     V4L2_CID_TEST_PATTERN,
7443     - ARRAY_SIZE(tvp5150_test_patterns),
7444     + ARRAY_SIZE(tvp5150_test_patterns) - 1,
7445     0, 0, tvp5150_test_patterns);
7446     sd->ctrl_handler = &core->hdl;
7447     if (core->hdl.error) {
7448     diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
7449     index 477c80a4d44c..cd4c8230563c 100644
7450     --- a/drivers/media/platform/vivid/vivid-core.h
7451     +++ b/drivers/media/platform/vivid/vivid-core.h
7452     @@ -111,7 +111,7 @@ enum vivid_colorspace {
7453     VIVID_CS_170M,
7454     VIVID_CS_709,
7455     VIVID_CS_SRGB,
7456     - VIVID_CS_ADOBERGB,
7457     + VIVID_CS_OPRGB,
7458     VIVID_CS_2020,
7459     VIVID_CS_DCI_P3,
7460     VIVID_CS_240M,
7461     diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
7462     index 5429193fbb91..999aa101b150 100644
7463     --- a/drivers/media/platform/vivid/vivid-ctrls.c
7464     +++ b/drivers/media/platform/vivid/vivid-ctrls.c
7465     @@ -348,7 +348,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl)
7466     V4L2_COLORSPACE_SMPTE170M,
7467     V4L2_COLORSPACE_REC709,
7468     V4L2_COLORSPACE_SRGB,
7469     - V4L2_COLORSPACE_ADOBERGB,
7470     + V4L2_COLORSPACE_OPRGB,
7471     V4L2_COLORSPACE_BT2020,
7472     V4L2_COLORSPACE_DCI_P3,
7473     V4L2_COLORSPACE_SMPTE240M,
7474     @@ -729,7 +729,7 @@ static const char * const vivid_ctrl_colorspace_strings[] = {
7475     "SMPTE 170M",
7476     "Rec. 709",
7477     "sRGB",
7478     - "AdobeRGB",
7479     + "opRGB",
7480     "BT.2020",
7481     "DCI-P3",
7482     "SMPTE 240M",
7483     @@ -752,7 +752,7 @@ static const char * const vivid_ctrl_xfer_func_strings[] = {
7484     "Default",
7485     "Rec. 709",
7486     "sRGB",
7487     - "AdobeRGB",
7488     + "opRGB",
7489     "SMPTE 240M",
7490     "None",
7491     "DCI-P3",
7492     diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
7493     index 51fec66d8d45..50248e2176a0 100644
7494     --- a/drivers/media/platform/vivid/vivid-vid-out.c
7495     +++ b/drivers/media/platform/vivid/vivid-vid-out.c
7496     @@ -413,7 +413,7 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
7497     mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
7498     } else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M &&
7499     mp->colorspace != V4L2_COLORSPACE_REC709 &&
7500     - mp->colorspace != V4L2_COLORSPACE_ADOBERGB &&
7501     + mp->colorspace != V4L2_COLORSPACE_OPRGB &&
7502     mp->colorspace != V4L2_COLORSPACE_BT2020 &&
7503     mp->colorspace != V4L2_COLORSPACE_SRGB) {
7504     mp->colorspace = V4L2_COLORSPACE_REC709;
7505     diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
7506     index 1aa88d94e57f..e28bd8836751 100644
7507     --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
7508     +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
7509     @@ -31,6 +31,7 @@ MODULE_PARM_DESC(disable_rc, "Disable inbuilt IR receiver.");
7510     DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
7511    
7512     struct dvbsky_state {
7513     + struct mutex stream_mutex;
7514     u8 ibuf[DVBSKY_BUF_LEN];
7515     u8 obuf[DVBSKY_BUF_LEN];
7516     u8 last_lock;
7517     @@ -67,17 +68,18 @@ static int dvbsky_usb_generic_rw(struct dvb_usb_device *d,
7518    
7519     static int dvbsky_stream_ctrl(struct dvb_usb_device *d, u8 onoff)
7520     {
7521     + struct dvbsky_state *state = d_to_priv(d);
7522     int ret;
7523     - static u8 obuf_pre[3] = { 0x37, 0, 0 };
7524     - static u8 obuf_post[3] = { 0x36, 3, 0 };
7525     + u8 obuf_pre[3] = { 0x37, 0, 0 };
7526     + u8 obuf_post[3] = { 0x36, 3, 0 };
7527    
7528     - mutex_lock(&d->usb_mutex);
7529     - ret = dvb_usbv2_generic_rw_locked(d, obuf_pre, 3, NULL, 0);
7530     + mutex_lock(&state->stream_mutex);
7531     + ret = dvbsky_usb_generic_rw(d, obuf_pre, 3, NULL, 0);
7532     if (!ret && onoff) {
7533     msleep(20);
7534     - ret = dvb_usbv2_generic_rw_locked(d, obuf_post, 3, NULL, 0);
7535     + ret = dvbsky_usb_generic_rw(d, obuf_post, 3, NULL, 0);
7536     }
7537     - mutex_unlock(&d->usb_mutex);
7538     + mutex_unlock(&state->stream_mutex);
7539     return ret;
7540     }
7541    
7542     @@ -606,6 +608,8 @@ static int dvbsky_init(struct dvb_usb_device *d)
7543     if (ret)
7544     return ret;
7545     */
7546     + mutex_init(&state->stream_mutex);
7547     +
7548     state->last_lock = 0;
7549    
7550     return 0;
7551     diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
7552     index 71c829f31d3b..87b887b7604e 100644
7553     --- a/drivers/media/usb/em28xx/em28xx-cards.c
7554     +++ b/drivers/media/usb/em28xx/em28xx-cards.c
7555     @@ -2141,13 +2141,13 @@ const struct em28xx_board em28xx_boards[] = {
7556     .input = { {
7557     .type = EM28XX_VMUX_COMPOSITE,
7558     .vmux = TVP5150_COMPOSITE1,
7559     - .amux = EM28XX_AUDIO_SRC_LINE,
7560     + .amux = EM28XX_AMUX_LINE_IN,
7561     .gpio = terratec_av350_unmute_gpio,
7562    
7563     }, {
7564     .type = EM28XX_VMUX_SVIDEO,
7565     .vmux = TVP5150_SVIDEO,
7566     - .amux = EM28XX_AUDIO_SRC_LINE,
7567     + .amux = EM28XX_AMUX_LINE_IN,
7568     .gpio = terratec_av350_unmute_gpio,
7569     } },
7570     },
7571     @@ -3039,6 +3039,9 @@ static int em28xx_hint_board(struct em28xx *dev)
7572    
7573     static void em28xx_card_setup(struct em28xx *dev)
7574     {
7575     + int i, j, idx;
7576     + bool duplicate_entry;
7577     +
7578     /*
7579     * If the device can be a webcam, seek for a sensor.
7580     * If sensor is not found, then it isn't a webcam.
7581     @@ -3195,6 +3198,32 @@ static void em28xx_card_setup(struct em28xx *dev)
7582     /* Allow override tuner type by a module parameter */
7583     if (tuner >= 0)
7584     dev->tuner_type = tuner;
7585     +
7586     + /*
7587     + * Dynamically generate a list of valid audio inputs for this
7588     + * specific board, mapping them via enum em28xx_amux.
7589     + */
7590     +
7591     + idx = 0;
7592     + for (i = 0; i < MAX_EM28XX_INPUT; i++) {
7593     + if (!INPUT(i)->type)
7594     + continue;
7595     +
7596     + /* Skip already mapped audio inputs */
7597     + duplicate_entry = false;
7598     + for (j = 0; j < idx; j++) {
7599     + if (INPUT(i)->amux == dev->amux_map[j]) {
7600     + duplicate_entry = true;
7601     + break;
7602     + }
7603     + }
7604     + if (duplicate_entry)
7605     + continue;
7606     +
7607     + dev->amux_map[idx++] = INPUT(i)->amux;
7608     + }
7609     + for (; idx < MAX_EM28XX_INPUT; idx++)
7610     + dev->amux_map[idx] = EM28XX_AMUX_UNUSED;
7611     }
7612    
7613     void em28xx_setup_xc3028(struct em28xx *dev, struct xc2028_ctrl *ctl)
7614     diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
7615     index 68571bf36d28..3bf98ac897ec 100644
7616     --- a/drivers/media/usb/em28xx/em28xx-video.c
7617     +++ b/drivers/media/usb/em28xx/em28xx-video.c
7618     @@ -1093,6 +1093,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
7619    
7620     em28xx_videodbg("%s\n", __func__);
7621    
7622     + dev->v4l2->field_count = 0;
7623     +
7624     /*
7625     * Make sure streaming is not already in progress for this type
7626     * of filehandle (e.g. video, vbi)
7627     @@ -1471,9 +1473,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
7628    
7629     fmt = format_by_fourcc(f->fmt.pix.pixelformat);
7630     if (!fmt) {
7631     - em28xx_videodbg("Fourcc format (%08x) invalid.\n",
7632     - f->fmt.pix.pixelformat);
7633     - return -EINVAL;
7634     + fmt = &format[0];
7635     + em28xx_videodbg("Fourcc format (%08x) invalid. Using default (%08x).\n",
7636     + f->fmt.pix.pixelformat, fmt->fourcc);
7637     }
7638    
7639     if (dev->board.is_em2800) {
7640     @@ -1666,6 +1668,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
7641     {
7642     struct em28xx *dev = video_drvdata(file);
7643     unsigned int n;
7644     + int j;
7645    
7646     n = i->index;
7647     if (n >= MAX_EM28XX_INPUT)
7648     @@ -1685,6 +1688,12 @@ static int vidioc_enum_input(struct file *file, void *priv,
7649     if (dev->is_webcam)
7650     i->capabilities = 0;
7651    
7652     + /* Dynamically generates an audioset bitmask */
7653     + i->audioset = 0;
7654     + for (j = 0; j < MAX_EM28XX_INPUT; j++)
7655     + if (dev->amux_map[j] != EM28XX_AMUX_UNUSED)
7656     + i->audioset |= 1 << j;
7657     +
7658     return 0;
7659     }
7660    
7661     @@ -1710,11 +1719,24 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
7662     return 0;
7663     }
7664    
7665     -static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
7666     +static int em28xx_fill_audio_input(struct em28xx *dev,
7667     + const char *s,
7668     + struct v4l2_audio *a,
7669     + unsigned int index)
7670     {
7671     - struct em28xx *dev = video_drvdata(file);
7672     + unsigned int idx = dev->amux_map[index];
7673     +
7674     + /*
7675     + * With msp3400, almost all mappings use the default (amux = 0).
7676     + * The only one may use a different value is WinTV USB2, where it
7677     + * can also be SCART1 input.
7678     + * As it is very doubtful that we would see new boards with msp3400,
7679     + * let's just reuse the existing switch.
7680     + */
7681     + if (dev->has_msp34xx && idx != EM28XX_AMUX_UNUSED)
7682     + idx = EM28XX_AMUX_LINE_IN;
7683    
7684     - switch (a->index) {
7685     + switch (idx) {
7686     case EM28XX_AMUX_VIDEO:
7687     strcpy(a->name, "Television");
7688     break;
7689     @@ -1739,32 +1761,79 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
7690     case EM28XX_AMUX_PCM_OUT:
7691     strcpy(a->name, "PCM");
7692     break;
7693     + case EM28XX_AMUX_UNUSED:
7694     default:
7695     return -EINVAL;
7696     }
7697     -
7698     - a->index = dev->ctl_ainput;
7699     + a->index = index;
7700     a->capability = V4L2_AUDCAP_STEREO;
7701    
7702     + em28xx_videodbg("%s: audio input index %d is '%s'\n",
7703     + s, a->index, a->name);
7704     +
7705     return 0;
7706     }
7707    
7708     +static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
7709     +{
7710     + struct em28xx *dev = video_drvdata(file);
7711     +
7712     + if (a->index >= MAX_EM28XX_INPUT)
7713     + return -EINVAL;
7714     +
7715     + return em28xx_fill_audio_input(dev, __func__, a, a->index);
7716     +}
7717     +
7718     +static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
7719     +{
7720     + struct em28xx *dev = video_drvdata(file);
7721     + int i;
7722     +
7723     + for (i = 0; i < MAX_EM28XX_INPUT; i++)
7724     + if (dev->ctl_ainput == dev->amux_map[i])
7725     + return em28xx_fill_audio_input(dev, __func__, a, i);
7726     +
7727     + /* Should never happen! */
7728     + return -EINVAL;
7729     +}
7730     +
7731     static int vidioc_s_audio(struct file *file, void *priv,
7732     const struct v4l2_audio *a)
7733     {
7734     struct em28xx *dev = video_drvdata(file);
7735     + int idx, i;
7736    
7737     if (a->index >= MAX_EM28XX_INPUT)
7738     return -EINVAL;
7739     - if (!INPUT(a->index)->type)
7740     +
7741     + idx = dev->amux_map[a->index];
7742     +
7743     + if (idx == EM28XX_AMUX_UNUSED)
7744     return -EINVAL;
7745    
7746     - dev->ctl_ainput = INPUT(a->index)->amux;
7747     - dev->ctl_aoutput = INPUT(a->index)->aout;
7748     + dev->ctl_ainput = idx;
7749     +
7750     + /*
7751     + * FIXME: This is wrong, as different inputs at em28xx_cards
7752     + * may have different audio outputs. So, the right thing
7753     + * to do is to implement VIDIOC_G_AUDOUT/VIDIOC_S_AUDOUT.
7754     + * With the current board definitions, this would work fine,
7755     + * as, currently, all boards fit.
7756     + */
7757     + for (i = 0; i < MAX_EM28XX_INPUT; i++)
7758     + if (idx == dev->amux_map[i])
7759     + break;
7760     + if (i == MAX_EM28XX_INPUT)
7761     + return -EINVAL;
7762     +
7763     + dev->ctl_aoutput = INPUT(i)->aout;
7764    
7765     if (!dev->ctl_aoutput)
7766     dev->ctl_aoutput = EM28XX_AOUT_MASTER;
7767    
7768     + em28xx_videodbg("%s: set audio input to %d\n", __func__,
7769     + dev->ctl_ainput);
7770     +
7771     return 0;
7772     }
7773    
7774     @@ -2302,6 +2371,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
7775     .vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
7776     .vidioc_s_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
7777     .vidioc_enum_framesizes = vidioc_enum_framesizes,
7778     + .vidioc_enumaudio = vidioc_enumaudio,
7779     .vidioc_g_audio = vidioc_g_audio,
7780     .vidioc_s_audio = vidioc_s_audio,
7781    
7782     diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
7783     index 953caac025f2..a551072e62ed 100644
7784     --- a/drivers/media/usb/em28xx/em28xx.h
7785     +++ b/drivers/media/usb/em28xx/em28xx.h
7786     @@ -335,6 +335,9 @@ enum em28xx_usb_audio_type {
7787     /**
7788     * em28xx_amux - describes the type of audio input used by em28xx
7789     *
7790     + * @EM28XX_AMUX_UNUSED:
7791     + * Used only on em28xx dev->map field, in order to mark an entry
7792     + * as unused.
7793     * @EM28XX_AMUX_VIDEO:
7794     * On devices without AC97, this is the only value that it is currently
7795     * allowed.
7796     @@ -369,7 +372,8 @@ enum em28xx_usb_audio_type {
7797     * same time, via the alsa mux.
7798     */
7799     enum em28xx_amux {
7800     - EM28XX_AMUX_VIDEO,
7801     + EM28XX_AMUX_UNUSED = -1,
7802     + EM28XX_AMUX_VIDEO = 0,
7803     EM28XX_AMUX_LINE_IN,
7804    
7805     /* Some less-common mixer setups */
7806     @@ -692,6 +696,8 @@ struct em28xx {
7807     unsigned int ctl_input; // selected input
7808     unsigned int ctl_ainput;// selected audio input
7809     unsigned int ctl_aoutput;// selected audio output
7810     + enum em28xx_amux amux_map[MAX_EM28XX_INPUT];
7811     +
7812     int mute;
7813     int volume;
7814    
7815     diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
7816     index c81faea96fba..c7c600c1f63b 100644
7817     --- a/drivers/media/v4l2-core/v4l2-dv-timings.c
7818     +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
7819     @@ -837,9 +837,9 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
7820     switch (avi->colorimetry) {
7821     case HDMI_COLORIMETRY_EXTENDED:
7822     switch (avi->extended_colorimetry) {
7823     - case HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB:
7824     - c.colorspace = V4L2_COLORSPACE_ADOBERGB;
7825     - c.xfer_func = V4L2_XFER_FUNC_ADOBERGB;
7826     + case HDMI_EXTENDED_COLORIMETRY_OPRGB:
7827     + c.colorspace = V4L2_COLORSPACE_OPRGB;
7828     + c.xfer_func = V4L2_XFER_FUNC_OPRGB;
7829     break;
7830     case HDMI_EXTENDED_COLORIMETRY_BT2020:
7831     c.colorspace = V4L2_COLORSPACE_BT2020;
7832     @@ -908,10 +908,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
7833     c.ycbcr_enc = V4L2_YCBCR_ENC_601;
7834     c.xfer_func = V4L2_XFER_FUNC_SRGB;
7835     break;
7836     - case HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601:
7837     - c.colorspace = V4L2_COLORSPACE_ADOBERGB;
7838     + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601:
7839     + c.colorspace = V4L2_COLORSPACE_OPRGB;
7840     c.ycbcr_enc = V4L2_YCBCR_ENC_601;
7841     - c.xfer_func = V4L2_XFER_FUNC_ADOBERGB;
7842     + c.xfer_func = V4L2_XFER_FUNC_OPRGB;
7843     break;
7844     case HDMI_EXTENDED_COLORIMETRY_BT2020:
7845     c.colorspace = V4L2_COLORSPACE_BT2020;
7846     diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
7847     index 29b7164a823b..d28ebe7ecd21 100644
7848     --- a/drivers/mfd/menelaus.c
7849     +++ b/drivers/mfd/menelaus.c
7850     @@ -1094,6 +1094,7 @@ static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
7851     static inline void menelaus_rtc_init(struct menelaus_chip *m)
7852     {
7853     int alarm = (m->client->irq > 0);
7854     + int err;
7855    
7856     /* assume 32KDETEN pin is pulled high */
7857     if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
7858     @@ -1101,6 +1102,12 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
7859     return;
7860     }
7861    
7862     + m->rtc = devm_rtc_allocate_device(&m->client->dev);
7863     + if (IS_ERR(m->rtc))
7864     + return;
7865     +
7866     + m->rtc->ops = &menelaus_rtc_ops;
7867     +
7868     /* support RTC alarm; it can issue wakeups */
7869     if (alarm) {
7870     if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
7871     @@ -1125,10 +1132,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
7872     menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
7873     }
7874    
7875     - m->rtc = rtc_device_register(DRIVER_NAME,
7876     - &m->client->dev,
7877     - &menelaus_rtc_ops, THIS_MODULE);
7878     - if (IS_ERR(m->rtc)) {
7879     + err = rtc_register_device(m->rtc);
7880     + if (err) {
7881     if (alarm) {
7882     menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
7883     device_init_wakeup(&m->client->dev, 0);
7884     diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
7885     index 120738d6e58b..77ed3967c5b0 100644
7886     --- a/drivers/misc/genwqe/card_base.h
7887     +++ b/drivers/misc/genwqe/card_base.h
7888     @@ -408,7 +408,7 @@ struct genwqe_file {
7889     struct file *filp;
7890    
7891     struct fasync_struct *async_queue;
7892     - struct task_struct *owner;
7893     + struct pid *opener;
7894     struct list_head list; /* entry in list of open files */
7895    
7896     spinlock_t map_lock; /* lock for dma_mappings */
7897     diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
7898     index f453ab82f0d7..8c1b63a4337b 100644
7899     --- a/drivers/misc/genwqe/card_dev.c
7900     +++ b/drivers/misc/genwqe/card_dev.c
7901     @@ -52,7 +52,7 @@ static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
7902     {
7903     unsigned long flags;
7904    
7905     - cfile->owner = current;
7906     + cfile->opener = get_pid(task_tgid(current));
7907     spin_lock_irqsave(&cd->file_lock, flags);
7908     list_add(&cfile->list, &cd->file_list);
7909     spin_unlock_irqrestore(&cd->file_lock, flags);
7910     @@ -65,6 +65,7 @@ static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
7911     spin_lock_irqsave(&cd->file_lock, flags);
7912     list_del(&cfile->list);
7913     spin_unlock_irqrestore(&cd->file_lock, flags);
7914     + put_pid(cfile->opener);
7915    
7916     return 0;
7917     }
7918     @@ -275,7 +276,7 @@ static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
7919     return files;
7920     }
7921    
7922     -static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
7923     +static int genwqe_terminate(struct genwqe_dev *cd)
7924     {
7925     unsigned int files = 0;
7926     unsigned long flags;
7927     @@ -283,7 +284,7 @@ static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
7928    
7929     spin_lock_irqsave(&cd->file_lock, flags);
7930     list_for_each_entry(cfile, &cd->file_list, list) {
7931     - force_sig(sig, cfile->owner);
7932     + kill_pid(cfile->opener, SIGKILL, 1);
7933     files++;
7934     }
7935     spin_unlock_irqrestore(&cd->file_lock, flags);
7936     @@ -1352,7 +1353,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
7937     dev_warn(&pci_dev->dev,
7938     "[%s] send SIGKILL and wait ...\n", __func__);
7939    
7940     - rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
7941     + rc = genwqe_terminate(cd);
7942     if (rc) {
7943     /* Give kill_timout more seconds to end processes */
7944     for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
7945     diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
7946     index 2e30de9c694a..57a6bb1fd3c9 100644
7947     --- a/drivers/misc/ocxl/config.c
7948     +++ b/drivers/misc/ocxl/config.c
7949     @@ -280,7 +280,9 @@ int ocxl_config_check_afu_index(struct pci_dev *dev,
7950     u32 val;
7951     int rc, templ_major, templ_minor, len;
7952    
7953     - pci_write_config_word(dev, fn->dvsec_afu_info_pos, afu_idx);
7954     + pci_write_config_byte(dev,
7955     + fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
7956     + afu_idx);
7957     rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val);
7958     if (rc)
7959     return rc;
7960     diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
7961     index d7eaf1eb11e7..003bfba40758 100644
7962     --- a/drivers/misc/vmw_vmci/vmci_driver.c
7963     +++ b/drivers/misc/vmw_vmci/vmci_driver.c
7964     @@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
7965    
7966     MODULE_AUTHOR("VMware, Inc.");
7967     MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
7968     -MODULE_VERSION("1.1.5.0-k");
7969     +MODULE_VERSION("1.1.6.0-k");
7970     MODULE_LICENSE("GPL v2");
7971     diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
7972     index 1ab6e8737a5f..da1ee2e1ba99 100644
7973     --- a/drivers/misc/vmw_vmci/vmci_resource.c
7974     +++ b/drivers/misc/vmw_vmci/vmci_resource.c
7975     @@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
7976    
7977     if (r->type == type &&
7978     rid == handle.resource &&
7979     - (cid == handle.context || cid == VMCI_INVALID_ID)) {
7980     + (cid == handle.context || cid == VMCI_INVALID_ID ||
7981     + handle.context == VMCI_INVALID_ID)) {
7982     resource = r;
7983     break;
7984     }
7985     diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
7986     index 32321bd596d8..c61109f7b793 100644
7987     --- a/drivers/mmc/host/sdhci-acpi.c
7988     +++ b/drivers/mmc/host/sdhci-acpi.c
7989     @@ -76,6 +76,7 @@ struct sdhci_acpi_slot {
7990     size_t priv_size;
7991     int (*probe_slot)(struct platform_device *, const char *, const char *);
7992     int (*remove_slot)(struct platform_device *);
7993     + int (*free_slot)(struct platform_device *pdev);
7994     int (*setup_host)(struct platform_device *pdev);
7995     };
7996    
7997     @@ -756,6 +757,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
7998     err_cleanup:
7999     sdhci_cleanup_host(c->host);
8000     err_free:
8001     + if (c->slot && c->slot->free_slot)
8002     + c->slot->free_slot(pdev);
8003     +
8004     sdhci_free_host(c->host);
8005     return err;
8006     }
8007     @@ -777,6 +781,10 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
8008    
8009     dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0);
8010     sdhci_remove_host(c->host, dead);
8011     +
8012     + if (c->slot && c->slot->free_slot)
8013     + c->slot->free_slot(pdev);
8014     +
8015     sdhci_free_host(c->host);
8016    
8017     return 0;
8018     diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
8019     index 77e9bc4aaee9..cc3ffeffd7a2 100644
8020     --- a/drivers/mmc/host/sdhci-pci-o2micro.c
8021     +++ b/drivers/mmc/host/sdhci-pci-o2micro.c
8022     @@ -490,6 +490,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
8023     pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
8024     break;
8025     case PCI_DEVICE_ID_O2_SEABIRD0:
8026     + if (chip->pdev->revision == 0x01)
8027     + chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
8028     + /* fall through */
8029     case PCI_DEVICE_ID_O2_SEABIRD1:
8030     /* UnLock WP */
8031     ret = pci_read_config_byte(chip->pdev,
8032     diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
8033     index 9d9723693217..2e3a8da3ce72 100644
8034     --- a/drivers/mtd/maps/gpio-addr-flash.c
8035     +++ b/drivers/mtd/maps/gpio-addr-flash.c
8036     @@ -238,7 +238,7 @@ static int gpio_flash_probe(struct platform_device *pdev)
8037     state->map.copy_to = gf_copy_to;
8038     state->map.bankwidth = pdata->width;
8039     state->map.size = state->win_size * (1 << state->gpio_count);
8040     - state->map.virt = ioremap_nocache(memory->start, state->map.size);
8041     + state->map.virt = ioremap_nocache(memory->start, state->win_size);
8042     if (!state->map.virt)
8043     return -ENOMEM;
8044    
8045     diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
8046     index a068b214ebaa..a594fb1e9a99 100644
8047     --- a/drivers/mtd/nand/raw/atmel/nand-controller.c
8048     +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
8049     @@ -2063,6 +2063,10 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
8050     nand_np = dev->of_node;
8051     nfc_np = of_find_compatible_node(dev->of_node, NULL,
8052     "atmel,sama5d3-nfc");
8053     + if (!nfc_np) {
8054     + dev_err(dev, "Could not find device node for sama5d3-nfc\n");
8055     + return -ENODEV;
8056     + }
8057    
8058     nc->clk = of_clk_get(nfc_np, 0);
8059     if (IS_ERR(nc->clk)) {
8060     diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
8061     index b864b93dd289..2242e999a76b 100644
8062     --- a/drivers/mtd/nand/raw/denali.c
8063     +++ b/drivers/mtd/nand/raw/denali.c
8064     @@ -28,6 +28,7 @@
8065     MODULE_LICENSE("GPL");
8066    
8067     #define DENALI_NAND_NAME "denali-nand"
8068     +#define DENALI_DEFAULT_OOB_SKIP_BYTES 8
8069    
8070     /* for Indexed Addressing */
8071     #define DENALI_INDEXED_CTRL 0x00
8072     @@ -1105,12 +1106,17 @@ static void denali_hw_init(struct denali_nand_info *denali)
8073     denali->revision = swab16(ioread32(denali->reg + REVISION));
8074    
8075     /*
8076     - * tell driver how many bit controller will skip before
8077     - * writing ECC code in OOB, this register may be already
8078     - * set by firmware. So we read this value out.
8079     - * if this value is 0, just let it be.
8080     + * Set how many bytes should be skipped before writing data in OOB.
8081     + * If a non-zero value has already been set (by firmware or something),
8082     + * just use it. Otherwise, set the driver default.
8083     */
8084     denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
8085     + if (!denali->oob_skip_bytes) {
8086     + denali->oob_skip_bytes = DENALI_DEFAULT_OOB_SKIP_BYTES;
8087     + iowrite32(denali->oob_skip_bytes,
8088     + denali->reg + SPARE_AREA_SKIP_BYTES);
8089     + }
8090     +
8091     denali_detect_max_banks(denali);
8092     iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
8093     iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
8094     diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
8095     index bc2ef5209783..c7573ccdbacd 100644
8096     --- a/drivers/mtd/nand/raw/marvell_nand.c
8097     +++ b/drivers/mtd/nand/raw/marvell_nand.c
8098     @@ -686,7 +686,7 @@ static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
8099    
8100     marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
8101    
8102     - if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
8103     + if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
8104     complete(&nfc->complete);
8105    
8106     return IRQ_HANDLED;
8107     diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
8108     index 7d9620c7ff6c..1ff3430f82c8 100644
8109     --- a/drivers/mtd/spi-nor/fsl-quadspi.c
8110     +++ b/drivers/mtd/spi-nor/fsl-quadspi.c
8111     @@ -478,6 +478,7 @@ static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd)
8112     {
8113     switch (cmd) {
8114     case SPINOR_OP_READ_1_1_4:
8115     + case SPINOR_OP_READ_1_1_4_4B:
8116     return SEQID_READ;
8117     case SPINOR_OP_WREN:
8118     return SEQID_WREN;
8119     @@ -543,6 +544,9 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
8120    
8121     /* trigger the LUT now */
8122     seqid = fsl_qspi_get_seqid(q, cmd);
8123     + if (seqid < 0)
8124     + return seqid;
8125     +
8126     qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len,
8127     base + QUADSPI_IPCR);
8128    
8129     @@ -671,7 +675,7 @@ static void fsl_qspi_set_map_addr(struct fsl_qspi *q)
8130     * causes the controller to clear the buffer, and use the sequence pointed
8131     * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash.
8132     */
8133     -static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
8134     +static int fsl_qspi_init_ahb_read(struct fsl_qspi *q)
8135     {
8136     void __iomem *base = q->iobase;
8137     int seqid;
8138     @@ -696,8 +700,13 @@ static void fsl_qspi_init_ahb_read(struct fsl_qspi *q)
8139    
8140     /* Set the default lut sequence for AHB Read. */
8141     seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode);
8142     + if (seqid < 0)
8143     + return seqid;
8144     +
8145     qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT,
8146     q->iobase + QUADSPI_BFGENCR);
8147     +
8148     + return 0;
8149     }
8150    
8151     /* This function was used to prepare and enable QSPI clock */
8152     @@ -805,9 +814,7 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
8153     fsl_qspi_init_lut(q);
8154    
8155     /* Init for AHB read */
8156     - fsl_qspi_init_ahb_read(q);
8157     -
8158     - return 0;
8159     + return fsl_qspi_init_ahb_read(q);
8160     }
8161    
8162     static const struct of_device_id fsl_qspi_dt_ids[] = {
8163     diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
8164     index c0976f2e3dd1..872b40922608 100644
8165     --- a/drivers/mtd/spi-nor/intel-spi-pci.c
8166     +++ b/drivers/mtd/spi-nor/intel-spi-pci.c
8167     @@ -65,6 +65,7 @@ static void intel_spi_pci_remove(struct pci_dev *pdev)
8168     static const struct pci_device_id intel_spi_pci_ids[] = {
8169     { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
8170     { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
8171     + { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
8172     { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
8173     { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
8174     { },
8175     diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
8176     index 46af8052e535..152a65d46e0b 100644
8177     --- a/drivers/net/dsa/mv88e6xxx/phy.c
8178     +++ b/drivers/net/dsa/mv88e6xxx/phy.c
8179     @@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
8180     err = mv88e6xxx_phy_page_get(chip, phy, page);
8181     if (!err) {
8182     err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
8183     + if (!err)
8184     + err = mv88e6xxx_phy_write(chip, phy, reg, val);
8185     +
8186     mv88e6xxx_phy_page_put(chip, phy);
8187     }
8188    
8189     diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
8190     index 34af5f1569c8..de0e24d912fe 100644
8191     --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
8192     +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
8193     @@ -342,7 +342,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
8194     if (!compat)
8195     return NULL;
8196    
8197     - priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
8198     + priv->mdio_dn = of_get_compatible_child(dn, compat);
8199     kfree(compat);
8200     if (!priv->mdio_dn) {
8201     dev_err(kdev, "unable to find MDIO bus node\n");
8202     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
8203     index 955c4ab18b03..b7b2f8254ce1 100644
8204     --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
8205     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
8206     @@ -1915,6 +1915,7 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
8207     bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
8208     {
8209     struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
8210     + struct hns3_nic_priv *priv = netdev_priv(netdev);
8211     struct netdev_queue *dev_queue;
8212     int bytes, pkts;
8213     int head;
8214     @@ -1961,7 +1962,8 @@ bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
8215     * sees the new next_to_clean.
8216     */
8217     smp_mb();
8218     - if (netif_tx_queue_stopped(dev_queue)) {
8219     + if (netif_tx_queue_stopped(dev_queue) &&
8220     + !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
8221     netif_tx_wake_queue(dev_queue);
8222     ring->stats.restart_queue++;
8223     }
8224     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
8225     index f70ee6910ee2..9684ad015c42 100644
8226     --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
8227     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
8228     @@ -309,7 +309,7 @@ static void hns3_self_test(struct net_device *ndev,
8229     h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK;
8230    
8231     if (if_running)
8232     - dev_close(ndev);
8233     + ndev->netdev_ops->ndo_stop(ndev);
8234    
8235     #if IS_ENABLED(CONFIG_VLAN_8021Q)
8236     /* Disable the vlan filter for selftest does not support it */
8237     @@ -347,7 +347,7 @@ static void hns3_self_test(struct net_device *ndev,
8238     #endif
8239    
8240     if (if_running)
8241     - dev_open(ndev);
8242     + ndev->netdev_ops->ndo_open(ndev);
8243     }
8244    
8245     static int hns3_get_sset_count(struct net_device *netdev, int stringset)
8246     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
8247     index f08ebb7caaaf..92f19384e258 100644
8248     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
8249     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
8250     @@ -73,6 +73,7 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
8251     static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
8252     u8 *tc, bool *changed)
8253     {
8254     + bool has_ets_tc = false;
8255     u32 total_ets_bw = 0;
8256     u8 max_tc = 0;
8257     u8 i;
8258     @@ -100,13 +101,14 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
8259     *changed = true;
8260    
8261     total_ets_bw += ets->tc_tx_bw[i];
8262     - break;
8263     + has_ets_tc = true;
8264     + break;
8265     default:
8266     return -EINVAL;
8267     }
8268     }
8269    
8270     - if (total_ets_bw != BW_PERCENT)
8271     + if (has_ets_tc && total_ets_bw != BW_PERCENT)
8272     return -EINVAL;
8273    
8274     *tc = max_tc + 1;
8275     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8276     index 8577dfc799ad..db763450e5e3 100644
8277     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8278     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8279     @@ -1657,11 +1657,13 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
8280     static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
8281     struct hclge_pkt_buf_alloc *buf_alloc)
8282     {
8283     - u32 rx_all = hdev->pkt_buf_size;
8284     +#define HCLGE_BUF_SIZE_UNIT 128
8285     + u32 rx_all = hdev->pkt_buf_size, aligned_mps;
8286     int no_pfc_priv_num, pfc_priv_num;
8287     struct hclge_priv_buf *priv;
8288     int i;
8289    
8290     + aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
8291     rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
8292    
8293     /* When DCB is not supported, rx private
8294     @@ -1680,13 +1682,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
8295     if (hdev->hw_tc_map & BIT(i)) {
8296     priv->enable = 1;
8297     if (hdev->tm_info.hw_pfc_map & BIT(i)) {
8298     - priv->wl.low = hdev->mps;
8299     - priv->wl.high = priv->wl.low + hdev->mps;
8300     + priv->wl.low = aligned_mps;
8301     + priv->wl.high = priv->wl.low + aligned_mps;
8302     priv->buf_size = priv->wl.high +
8303     HCLGE_DEFAULT_DV;
8304     } else {
8305     priv->wl.low = 0;
8306     - priv->wl.high = 2 * hdev->mps;
8307     + priv->wl.high = 2 * aligned_mps;
8308     priv->buf_size = priv->wl.high;
8309     }
8310     } else {
8311     @@ -1718,11 +1720,11 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
8312    
8313     if (hdev->tm_info.hw_pfc_map & BIT(i)) {
8314     priv->wl.low = 128;
8315     - priv->wl.high = priv->wl.low + hdev->mps;
8316     + priv->wl.high = priv->wl.low + aligned_mps;
8317     priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
8318     } else {
8319     priv->wl.low = 0;
8320     - priv->wl.high = hdev->mps;
8321     + priv->wl.high = aligned_mps;
8322     priv->buf_size = priv->wl.high;
8323     }
8324     }
8325     @@ -2360,6 +2362,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
8326     int mac_state;
8327     int link_stat;
8328    
8329     + if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
8330     + return 0;
8331     +
8332     mac_state = hclge_get_mac_link_status(hdev);
8333    
8334     if (hdev->hw.mac.phydev) {
8335     @@ -3809,6 +3814,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
8336     struct hclge_dev *hdev = vport->back;
8337     int i;
8338    
8339     + set_bit(HCLGE_STATE_DOWN, &hdev->state);
8340     +
8341     del_timer_sync(&hdev->service_timer);
8342     cancel_work_sync(&hdev->service_task);
8343     clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
8344     @@ -4686,9 +4693,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
8345     "Add vf vlan filter fail, ret =%d.\n",
8346     req0->resp_code);
8347     } else {
8348     +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
8349     if (!req0->resp_code)
8350     return 0;
8351    
8352     + if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
8353     + dev_warn(&hdev->pdev->dev,
8354     + "vlan %d filter is not in vf vlan table\n",
8355     + vlan);
8356     + return 0;
8357     + }
8358     +
8359     dev_err(&hdev->pdev->dev,
8360     "Kill vf vlan filter fail, ret =%d.\n",
8361     req0->resp_code);
8362     @@ -4732,6 +4747,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8363     u16 vport_idx, vport_num = 0;
8364     int ret;
8365    
8366     + if (is_kill && !vlan_id)
8367     + return 0;
8368     +
8369     ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8370     0, proto);
8371     if (ret) {
8372     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
8373     index 9c0091f2addf..320043e87fc6 100644
8374     --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
8375     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
8376     @@ -299,6 +299,9 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
8377    
8378     client = handle->client;
8379    
8380     + link_state =
8381     + test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
8382     +
8383     if (link_state != hdev->hw.mac.link) {
8384     client->ops->link_status_change(handle, !!link_state);
8385     hdev->hw.mac.link = link_state;
8386     @@ -1448,6 +1451,8 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
8387     struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
8388     int i, queue_id;
8389    
8390     + set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
8391     +
8392     for (i = 0; i < hdev->num_tqps; i++) {
8393     /* Ring disable */
8394     queue_id = hclgevf_get_queue_id(handle->kinfo.tqp[i]);
8395     diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
8396     index 868f4a1d0f72..67591722c625 100644
8397     --- a/drivers/net/ethernet/intel/ice/ice.h
8398     +++ b/drivers/net/ethernet/intel/ice/ice.h
8399     @@ -39,9 +39,9 @@
8400     extern const char ice_drv_ver[];
8401     #define ICE_BAR0 0
8402     #define ICE_DFLT_NUM_DESC 128
8403     -#define ICE_MIN_NUM_DESC 8
8404     -#define ICE_MAX_NUM_DESC 8160
8405     #define ICE_REQ_DESC_MULTIPLE 32
8406     +#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
8407     +#define ICE_MAX_NUM_DESC 8160
8408     #define ICE_DFLT_TRAFFIC_CLASS BIT(0)
8409     #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
8410     #define ICE_ETHTOOL_FWVER_LEN 32
8411     diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
8412     index 62be72fdc8f3..e783976c401d 100644
8413     --- a/drivers/net/ethernet/intel/ice/ice_controlq.c
8414     +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
8415     @@ -518,22 +518,31 @@ shutdown_sq_out:
8416    
8417     /**
8418     * ice_aq_ver_check - Check the reported AQ API version.
8419     - * @fw_branch: The "branch" of FW, typically describes the device type
8420     - * @fw_major: The major version of the FW API
8421     - * @fw_minor: The minor version increment of the FW API
8422     + * @hw: pointer to the hardware structure
8423     *
8424     * Checks if the driver should load on a given AQ API version.
8425     *
8426     * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
8427     */
8428     -static bool ice_aq_ver_check(u8 fw_branch, u8 fw_major, u8 fw_minor)
8429     +static bool ice_aq_ver_check(struct ice_hw *hw)
8430     {
8431     - if (fw_branch != EXP_FW_API_VER_BRANCH)
8432     - return false;
8433     - if (fw_major != EXP_FW_API_VER_MAJOR)
8434     - return false;
8435     - if (fw_minor != EXP_FW_API_VER_MINOR)
8436     + if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
8437     + /* Major API version is newer than expected, don't load */
8438     + dev_warn(ice_hw_to_dev(hw),
8439     + "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
8440     return false;
8441     + } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
8442     + if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
8443     + dev_info(ice_hw_to_dev(hw),
8444     + "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
8445     + else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
8446     + dev_info(ice_hw_to_dev(hw),
8447     + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
8448     + } else {
8449     + /* Major API version is older than expected, log a warning */
8450     + dev_info(ice_hw_to_dev(hw),
8451     + "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
8452     + }
8453     return true;
8454     }
8455    
8456     @@ -588,8 +597,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
8457     if (status)
8458     goto init_ctrlq_free_rq;
8459    
8460     - if (!ice_aq_ver_check(hw->api_branch, hw->api_maj_ver,
8461     - hw->api_min_ver)) {
8462     + if (!ice_aq_ver_check(hw)) {
8463     status = ICE_ERR_FW_API_VER;
8464     goto init_ctrlq_free_rq;
8465     }
8466     diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
8467     index c71a9b528d6d..9d6754f65a1a 100644
8468     --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
8469     +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
8470     @@ -478,9 +478,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
8471     ring->tx_max_pending = ICE_MAX_NUM_DESC;
8472     ring->rx_pending = vsi->rx_rings[0]->count;
8473     ring->tx_pending = vsi->tx_rings[0]->count;
8474     - ring->rx_mini_pending = ICE_MIN_NUM_DESC;
8475     +
8476     + /* Rx mini and jumbo rings are not supported */
8477     ring->rx_mini_max_pending = 0;
8478     ring->rx_jumbo_max_pending = 0;
8479     + ring->rx_mini_pending = 0;
8480     ring->rx_jumbo_pending = 0;
8481     }
8482    
8483     @@ -498,14 +500,23 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
8484     ring->tx_pending < ICE_MIN_NUM_DESC ||
8485     ring->rx_pending > ICE_MAX_NUM_DESC ||
8486     ring->rx_pending < ICE_MIN_NUM_DESC) {
8487     - netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
8488     + netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
8489     ring->tx_pending, ring->rx_pending,
8490     - ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC);
8491     + ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
8492     + ICE_REQ_DESC_MULTIPLE);
8493     return -EINVAL;
8494     }
8495    
8496     new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
8497     + if (new_tx_cnt != ring->tx_pending)
8498     + netdev_info(netdev,
8499     + "Requested Tx descriptor count rounded up to %d\n",
8500     + new_tx_cnt);
8501     new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
8502     + if (new_rx_cnt != ring->rx_pending)
8503     + netdev_info(netdev,
8504     + "Requested Rx descriptor count rounded up to %d\n",
8505     + new_rx_cnt);
8506    
8507     /* if nothing to do return success */
8508     if (new_tx_cnt == vsi->tx_rings[0]->count &&
8509     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
8510     index da4322e4daed..add124e0381d 100644
8511     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
8512     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
8513     @@ -676,6 +676,9 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
8514     } else {
8515     struct tx_sa tsa;
8516    
8517     + if (adapter->num_vfs)
8518     + return -EOPNOTSUPP;
8519     +
8520     /* find the first unused index */
8521     ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
8522     if (ret < 0) {
8523     diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
8524     index 5a228582423b..4093a9c52c18 100644
8525     --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
8526     +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
8527     @@ -3849,6 +3849,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
8528     skb_checksum_help(skb);
8529     goto no_csum;
8530     }
8531     +
8532     + if (first->protocol == htons(ETH_P_IP))
8533     + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
8534     +
8535     /* update TX checksum flag */
8536     first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
8537     vlan_macip_lens = skb_checksum_start_offset(skb) -
8538     diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
8539     index db463e20a876..e9a4179e7e48 100644
8540     --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
8541     +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
8542     @@ -96,6 +96,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
8543     {
8544     struct nfp_pf *pf = devlink_priv(devlink);
8545     struct nfp_eth_table_port eth_port;
8546     + unsigned int lanes;
8547     int ret;
8548    
8549     if (count < 2)
8550     @@ -114,8 +115,12 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
8551     goto out;
8552     }
8553    
8554     - ret = nfp_devlink_set_lanes(pf, eth_port.index,
8555     - eth_port.port_lanes / count);
8556     + /* Special case the 100G CXP -> 2x40G split */
8557     + lanes = eth_port.port_lanes / count;
8558     + if (eth_port.lanes == 10 && count == 2)
8559     + lanes = 8 / count;
8560     +
8561     + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
8562     out:
8563     mutex_unlock(&pf->lock);
8564    
8565     @@ -128,6 +133,7 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
8566     {
8567     struct nfp_pf *pf = devlink_priv(devlink);
8568     struct nfp_eth_table_port eth_port;
8569     + unsigned int lanes;
8570     int ret;
8571    
8572     mutex_lock(&pf->lock);
8573     @@ -143,7 +149,12 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
8574     goto out;
8575     }
8576    
8577     - ret = nfp_devlink_set_lanes(pf, eth_port.index, eth_port.port_lanes);
8578     + /* Special case the 100G CXP -> 2x40G unsplit */
8579     + lanes = eth_port.port_lanes;
8580     + if (eth_port.port_lanes == 8)
8581     + lanes = 10;
8582     +
8583     + ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
8584     out:
8585     mutex_unlock(&pf->lock);
8586    
8587     diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
8588     index 4289ccb26e4e..d2caeb9edc04 100644
8589     --- a/drivers/net/ethernet/socionext/netsec.c
8590     +++ b/drivers/net/ethernet/socionext/netsec.c
8591     @@ -940,6 +940,9 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
8592     dring->head = 0;
8593     dring->tail = 0;
8594     dring->pkt_cnt = 0;
8595     +
8596     + if (id == NETSEC_RING_TX)
8597     + netdev_reset_queue(priv->ndev);
8598     }
8599    
8600     static void netsec_free_dring(struct netsec_priv *priv, int id)
8601     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
8602     index f9a61f90cfbc..0f660af01a4b 100644
8603     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
8604     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
8605     @@ -714,8 +714,9 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
8606     return -ENODEV;
8607     }
8608    
8609     - mdio_internal = of_find_compatible_node(mdio_mux, NULL,
8610     + mdio_internal = of_get_compatible_child(mdio_mux,
8611     "allwinner,sun8i-h3-mdio-internal");
8612     + of_node_put(mdio_mux);
8613     if (!mdio_internal) {
8614     dev_err(priv->device, "Cannot get internal_mdio node\n");
8615     return -ENODEV;
8616     @@ -729,13 +730,20 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
8617     gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL);
8618     if (IS_ERR(gmac->rst_ephy)) {
8619     ret = PTR_ERR(gmac->rst_ephy);
8620     - if (ret == -EPROBE_DEFER)
8621     + if (ret == -EPROBE_DEFER) {
8622     + of_node_put(iphynode);
8623     + of_node_put(mdio_internal);
8624     return ret;
8625     + }
8626     continue;
8627     }
8628     dev_info(priv->device, "Found internal PHY node\n");
8629     + of_node_put(iphynode);
8630     + of_node_put(mdio_internal);
8631     return 0;
8632     }
8633     +
8634     + of_node_put(mdio_internal);
8635     return -ENODEV;
8636     }
8637    
8638     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
8639     index 3af6d8d15233..1c37a821895b 100644
8640     --- a/drivers/net/hyperv/netvsc_drv.c
8641     +++ b/drivers/net/hyperv/netvsc_drv.c
8642     @@ -2022,14 +2022,15 @@ static void netvsc_vf_setup(struct work_struct *w)
8643     rtnl_unlock();
8644     }
8645    
8646     -/* Find netvsc by VMBus serial number.
8647     - * The PCI hyperv controller records the serial number as the slot.
8648     +/* Find netvsc by VF serial number.
8649     + * The PCI hyperv controller records the serial number as the slot kobj name.
8650     */
8651     static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
8652     {
8653     struct device *parent = vf_netdev->dev.parent;
8654     struct net_device_context *ndev_ctx;
8655     struct pci_dev *pdev;
8656     + u32 serial;
8657    
8658     if (!parent || !dev_is_pci(parent))
8659     return NULL; /* not a PCI device */
8660     @@ -2040,16 +2041,22 @@ static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
8661     return NULL;
8662     }
8663    
8664     + if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
8665     + netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
8666     + pci_slot_name(pdev->slot));
8667     + return NULL;
8668     + }
8669     +
8670     list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
8671     if (!ndev_ctx->vf_alloc)
8672     continue;
8673    
8674     - if (ndev_ctx->vf_serial == pdev->slot->number)
8675     + if (ndev_ctx->vf_serial == serial)
8676     return hv_get_drvdata(ndev_ctx->device_ctx);
8677     }
8678    
8679     netdev_notice(vf_netdev,
8680     - "no netdev found for slot %u\n", pdev->slot->number);
8681     + "no netdev found for vf serial:%u\n", serial);
8682     return NULL;
8683     }
8684    
8685     diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
8686     index 30612497643c..d192936b76cf 100644
8687     --- a/drivers/net/loopback.c
8688     +++ b/drivers/net/loopback.c
8689     @@ -75,6 +75,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
8690     int len;
8691    
8692     skb_tx_timestamp(skb);
8693     +
8694     + /* do not fool net_timestamp_check() with various clock bases */
8695     + skb->tstamp = 0;
8696     +
8697     skb_orphan(skb);
8698    
8699     /* Before queueing this packet to netif_rx(),
8700     diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
8701     index 7ae1856d1f18..5a749dc25bec 100644
8702     --- a/drivers/net/net_failover.c
8703     +++ b/drivers/net/net_failover.c
8704     @@ -603,6 +603,9 @@ static int net_failover_slave_unregister(struct net_device *slave_dev,
8705     primary_dev = rtnl_dereference(nfo_info->primary_dev);
8706     standby_dev = rtnl_dereference(nfo_info->standby_dev);
8707    
8708     + if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev))
8709     + return -ENODEV;
8710     +
8711     vlan_vids_del_by_dev(slave_dev, failover_dev);
8712     dev_uc_unsync(slave_dev, failover_dev);
8713     dev_mc_unsync(slave_dev, failover_dev);
8714     diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
8715     index 7abca86c3aa9..70f3f90c2ed6 100644
8716     --- a/drivers/net/phy/phylink.c
8717     +++ b/drivers/net/phy/phylink.c
8718     @@ -907,6 +907,9 @@ void phylink_start(struct phylink *pl)
8719     phylink_an_mode_str(pl->link_an_mode),
8720     phy_modes(pl->link_config.interface));
8721    
8722     + /* Always set the carrier off */
8723     + netif_carrier_off(pl->netdev);
8724     +
8725     /* Apply the link configuration to the MAC when starting. This allows
8726     * a fixed-link to start with the correct parameters, and also
8727     * ensures that we set the appropriate advertisement for Serdes links.
8728     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
8729     index 50e9cc19023a..c52207beef88 100644
8730     --- a/drivers/net/tun.c
8731     +++ b/drivers/net/tun.c
8732     @@ -2264,6 +2264,8 @@ static void tun_setup(struct net_device *dev)
8733     static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
8734     struct netlink_ext_ack *extack)
8735     {
8736     + if (!data)
8737     + return 0;
8738     return -EINVAL;
8739     }
8740    
8741     diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
8742     index fd612d2905b0..9f31b9a10850 100644
8743     --- a/drivers/net/wireless/ath/ath10k/wmi.c
8744     +++ b/drivers/net/wireless/ath/ath10k/wmi.c
8745     @@ -1869,6 +1869,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
8746     if (ret)
8747     dev_kfree_skb_any(skb);
8748    
8749     + if (ret == -EAGAIN) {
8750     + ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
8751     + cmd_id);
8752     + queue_work(ar->workqueue, &ar->restart_work);
8753     + }
8754     +
8755     return ret;
8756     }
8757    
8758     @@ -2336,7 +2342,12 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
8759     dma_unmap_single(ar->dev, pkt_addr->paddr,
8760     msdu->len, DMA_FROM_DEVICE);
8761     info = IEEE80211_SKB_CB(msdu);
8762     - info->flags |= status;
8763     +
8764     + if (status)
8765     + info->flags &= ~IEEE80211_TX_STAT_ACK;
8766     + else
8767     + info->flags |= IEEE80211_TX_STAT_ACK;
8768     +
8769     ieee80211_tx_status_irqsafe(ar->hw, msdu);
8770    
8771     ret = 0;
8772     diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
8773     index bca61cb44c37..3e7fc2983cbb 100644
8774     --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
8775     +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
8776     @@ -279,9 +279,6 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
8777     u16 buff_id;
8778    
8779     *d = *_d;
8780     - pa = wil_rx_desc_get_addr_edma(&d->dma);
8781     - dmalen = le16_to_cpu(d->dma.length);
8782     - dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
8783    
8784     /* Extract the SKB from the rx_buff management array */
8785     buff_id = __le16_to_cpu(d->mac.buff_id);
8786     @@ -291,10 +288,15 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
8787     }
8788     skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
8789     wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
8790     - if (unlikely(!skb))
8791     + if (unlikely(!skb)) {
8792     wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
8793     - else
8794     + } else {
8795     + pa = wil_rx_desc_get_addr_edma(&d->dma);
8796     + dmalen = le16_to_cpu(d->dma.length);
8797     + dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
8798     +
8799     kfree_skb(skb);
8800     + }
8801    
8802     /* Move the buffer from the active to the free list */
8803     list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
8804     @@ -906,6 +908,9 @@ again:
8805     wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
8806     if (!skb) {
8807     wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
8808     + /* Move the buffer from the active list to the free list */
8809     + list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
8810     + &wil->rx_buff_mgmt.free);
8811     goto again;
8812     }
8813    
8814     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
8815     index d8b79cb72b58..e7584b842dce 100644
8816     --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
8817     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
8818     @@ -77,6 +77,8 @@ static u16 d11ac_bw(enum brcmu_chan_bw bw)
8819     return BRCMU_CHSPEC_D11AC_BW_40;
8820     case BRCMU_CHAN_BW_80:
8821     return BRCMU_CHSPEC_D11AC_BW_80;
8822     + case BRCMU_CHAN_BW_160:
8823     + return BRCMU_CHSPEC_D11AC_BW_160;
8824     default:
8825     WARN_ON(1);
8826     }
8827     @@ -190,8 +192,38 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
8828     break;
8829     }
8830     break;
8831     - case BRCMU_CHSPEC_D11AC_BW_8080:
8832     case BRCMU_CHSPEC_D11AC_BW_160:
8833     + switch (ch->sb) {
8834     + case BRCMU_CHAN_SB_LLL:
8835     + ch->control_ch_num -= CH_70MHZ_APART;
8836     + break;
8837     + case BRCMU_CHAN_SB_LLU:
8838     + ch->control_ch_num -= CH_50MHZ_APART;
8839     + break;
8840     + case BRCMU_CHAN_SB_LUL:
8841     + ch->control_ch_num -= CH_30MHZ_APART;
8842     + break;
8843     + case BRCMU_CHAN_SB_LUU:
8844     + ch->control_ch_num -= CH_10MHZ_APART;
8845     + break;
8846     + case BRCMU_CHAN_SB_ULL:
8847     + ch->control_ch_num += CH_10MHZ_APART;
8848     + break;
8849     + case BRCMU_CHAN_SB_ULU:
8850     + ch->control_ch_num += CH_30MHZ_APART;
8851     + break;
8852     + case BRCMU_CHAN_SB_UUL:
8853     + ch->control_ch_num += CH_50MHZ_APART;
8854     + break;
8855     + case BRCMU_CHAN_SB_UUU:
8856     + ch->control_ch_num += CH_70MHZ_APART;
8857     + break;
8858     + default:
8859     + WARN_ON_ONCE(1);
8860     + break;
8861     + }
8862     + break;
8863     + case BRCMU_CHSPEC_D11AC_BW_8080:
8864     default:
8865     WARN_ON_ONCE(1);
8866     break;
8867     diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
8868     index 7b9a77981df1..75b2a0438cfa 100644
8869     --- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
8870     +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h
8871     @@ -29,6 +29,8 @@
8872     #define CH_UPPER_SB 0x01
8873     #define CH_LOWER_SB 0x02
8874     #define CH_EWA_VALID 0x04
8875     +#define CH_70MHZ_APART 14
8876     +#define CH_50MHZ_APART 10
8877     #define CH_30MHZ_APART 6
8878     #define CH_20MHZ_APART 4
8879     #define CH_10MHZ_APART 2
8880     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
8881     index 6bb1a99a197a..48a3611d6a31 100644
8882     --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
8883     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
8884     @@ -704,8 +704,12 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
8885     enabled = !!(wifi_pkg->package.elements[1].integer.value);
8886     n_profiles = wifi_pkg->package.elements[2].integer.value;
8887    
8888     - /* in case of BIOS bug */
8889     - if (n_profiles <= 0) {
8890     + /*
8891     + * Check the validity of n_profiles. The EWRD profiles start
8892     + * from index 1, so the maximum value allowed here is
8893     + * ACPI_SAR_PROFILES_NUM - 1.
8894     + */
8895     + if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
8896     ret = -EINVAL;
8897     goto out_free;
8898     }
8899     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
8900     index b15b0d84bb7e..155cc2ac0120 100644
8901     --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
8902     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
8903     @@ -1233,12 +1233,15 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
8904     iwl_mvm_del_aux_sta(mvm);
8905    
8906     /*
8907     - * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
8908     - * won't be called in this case).
8909     + * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
8910     + * hw (as restart_complete() won't be called in this case) and mac80211
8911     + * won't execute the restart.
8912     * But make sure to cleanup interfaces that have gone down before/during
8913     * HW restart was requested.
8914     */
8915     - if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
8916     + if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
8917     + test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
8918     + &mvm->status))
8919     ieee80211_iterate_interfaces(mvm->hw, 0,
8920     iwl_mvm_cleanup_iterator, mvm);
8921    
8922     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
8923     index 30cfd7d50bc9..f2830b5693d2 100644
8924     --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
8925     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
8926     @@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
8927     !(info->flags & IEEE80211_TX_STAT_AMPDU))
8928     return;
8929    
8930     - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
8931     + if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
8932     + &tx_resp_rate)) {
8933     + WARN_ON_ONCE(1);
8934     + return;
8935     + }
8936    
8937     #ifdef CONFIG_MAC80211_DEBUGFS
8938     /* Disable last tx check if we are debugging with fixed rate but
8939     @@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
8940     */
8941     table = &lq_sta->lq;
8942     lq_hwrate = le32_to_cpu(table->rs_table[0]);
8943     - rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
8944     + if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
8945     + WARN_ON_ONCE(1);
8946     + return;
8947     + }
8948    
8949     /* Here we actually compare this rate to the latest LQ command */
8950     if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
8951     @@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
8952     /* Collect data for each rate used during failed TX attempts */
8953     for (i = 0; i <= retries; ++i) {
8954     lq_hwrate = le32_to_cpu(table->rs_table[i]);
8955     - rs_rate_from_ucode_rate(lq_hwrate, info->band,
8956     - &lq_rate);
8957     + if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
8958     + &lq_rate)) {
8959     + WARN_ON_ONCE(1);
8960     + return;
8961     + }
8962     +
8963     /*
8964     * Only collect stats if retried rate is in the same RS
8965     * table as active/search.
8966     @@ -3262,7 +3273,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
8967     for (i = 0; i < num_rates; i++)
8968     lq_cmd->rs_table[i] = ucode_rate_le32;
8969    
8970     - rs_rate_from_ucode_rate(ucode_rate, band, &rate);
8971     + if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
8972     + WARN_ON_ONCE(1);
8973     + return;
8974     + }
8975    
8976     if (is_mimo(&rate))
8977     lq_cmd->mimo_delim = num_rates - 1;
8978     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
8979     index ff193dca2020..2d21f0a1fa00 100644
8980     --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
8981     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
8982     @@ -1405,6 +1405,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
8983     while (!skb_queue_empty(&skbs)) {
8984     struct sk_buff *skb = __skb_dequeue(&skbs);
8985     struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
8986     + struct ieee80211_hdr *hdr = (void *)skb->data;
8987     bool flushed = false;
8988    
8989     skb_freed++;
8990     @@ -1449,11 +1450,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
8991     info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
8992     info->flags &= ~IEEE80211_TX_CTL_AMPDU;
8993    
8994     - /* W/A FW bug: seq_ctl is wrong when the status isn't success */
8995     - if (status != TX_STATUS_SUCCESS) {
8996     - struct ieee80211_hdr *hdr = (void *)skb->data;
8997     + /* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
8998     + if (ieee80211_is_back_req(hdr->frame_control))
8999     + seq_ctl = 0;
9000     + else if (status != TX_STATUS_SUCCESS)
9001     seq_ctl = le16_to_cpu(hdr->seq_ctrl);
9002     - }
9003    
9004     if (unlikely(!seq_ctl)) {
9005     struct ieee80211_hdr *hdr = (void *)skb->data;
9006     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9007     index d017aa2a0a8b..d4a31e014c82 100644
9008     --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9009     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9010     @@ -1144,6 +1144,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
9011     kfree(trans_pcie->rxq);
9012     }
9013    
9014     +static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
9015     + struct iwl_rb_allocator *rba)
9016     +{
9017     + spin_lock(&rba->lock);
9018     + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
9019     + spin_unlock(&rba->lock);
9020     +}
9021     +
9022     /*
9023     * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
9024     *
9025     @@ -1175,9 +1183,7 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
9026     if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
9027     /* Move the 2 RBDs to the allocator ownership.
9028     Allocator has another 6 from pool for the request completion*/
9029     - spin_lock(&rba->lock);
9030     - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
9031     - spin_unlock(&rba->lock);
9032     + iwl_pcie_rx_move_to_allocator(rxq, rba);
9033    
9034     atomic_inc(&rba->req_pending);
9035     queue_work(rba->alloc_wq, &rba->rx_alloc);
9036     @@ -1396,10 +1402,18 @@ restart:
9037     IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
9038    
9039     while (i != r) {
9040     + struct iwl_rb_allocator *rba = &trans_pcie->rba;
9041     struct iwl_rx_mem_buffer *rxb;
9042     -
9043     - if (unlikely(rxq->used_count == rxq->queue_size / 2))
9044     + /* number of RBDs still waiting for page allocation */
9045     + u32 rb_pending_alloc =
9046     + atomic_read(&trans_pcie->rba.req_pending) *
9047     + RX_CLAIM_REQ_ALLOC;
9048     +
9049     + if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
9050     + !emergency)) {
9051     + iwl_pcie_rx_move_to_allocator(rxq, rba);
9052     emergency = true;
9053     + }
9054    
9055     rxb = iwl_pcie_get_rxb(trans, rxq, i);
9056     if (!rxb)
9057     @@ -1421,17 +1435,13 @@ restart:
9058     iwl_pcie_rx_allocator_get(trans, rxq);
9059    
9060     if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
9061     - struct iwl_rb_allocator *rba = &trans_pcie->rba;
9062     -
9063     /* Add the remaining empty RBDs for allocator use */
9064     - spin_lock(&rba->lock);
9065     - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
9066     - spin_unlock(&rba->lock);
9067     + iwl_pcie_rx_move_to_allocator(rxq, rba);
9068     } else if (emergency) {
9069     count++;
9070     if (count == 8) {
9071     count = 0;
9072     - if (rxq->used_count < rxq->queue_size / 3)
9073     + if (rb_pending_alloc < rxq->queue_size / 3)
9074     emergency = false;
9075    
9076     rxq->read = i;
9077     diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
9078     index c67a8e7be310..3dbfce972c56 100644
9079     --- a/drivers/net/wireless/marvell/libertas/if_usb.c
9080     +++ b/drivers/net/wireless/marvell/libertas/if_usb.c
9081     @@ -456,8 +456,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
9082     MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
9083     cardp);
9084    
9085     - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
9086     -
9087     lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
9088     if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
9089     lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
9090     diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
9091     index e92fc5001171..789337ea676a 100644
9092     --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
9093     +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
9094     @@ -605,9 +605,10 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
9095     {
9096     unsigned long flags;
9097    
9098     - if (recvlength > LBS_CMD_BUFFER_SIZE) {
9099     + if (recvlength < MESSAGE_HEADER_LEN ||
9100     + recvlength > LBS_CMD_BUFFER_SIZE) {
9101     lbtf_deb_usbd(&cardp->udev->dev,
9102     - "The receive buffer is too large\n");
9103     + "The receive buffer is invalid: %d\n", recvlength);
9104     kfree_skb(skb);
9105     return;
9106     }
9107     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
9108     index 23cf437d14f9..1a49d1be042d 100644
9109     --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
9110     +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
9111     @@ -128,8 +128,7 @@ __mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
9112     if (skb) {
9113     ret = mt76_write_beacon(dev, beacon_addr, skb);
9114     if (!ret)
9115     - dev->beacon_data_mask |= BIT(bcn_idx) &
9116     - dev->beacon_mask;
9117     + dev->beacon_data_mask |= BIT(bcn_idx);
9118     } else {
9119     dev->beacon_data_mask &= ~BIT(bcn_idx);
9120     for (i = 0; i < beacon_len; i += 4)
9121     diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
9122     index 1428cfdee579..9594433234cc 100644
9123     --- a/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
9124     +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_usb.c
9125     @@ -107,16 +107,24 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
9126     mt76u_mcu_complete_urb,
9127     &usb->mcu.cmpl);
9128     if (err < 0)
9129     - return err;
9130     + goto err;
9131    
9132     err = mt76u_submit_rx_buffers(&dev->mt76);
9133     if (err < 0)
9134     - return err;
9135     + goto err;
9136    
9137     tasklet_enable(&usb->rx_tasklet);
9138     tasklet_enable(&usb->tx_tasklet);
9139    
9140     - return mt76x2u_init_hardware(dev);
9141     + err = mt76x2u_init_hardware(dev);
9142     + if (err < 0)
9143     + goto err;
9144     +
9145     + return 0;
9146     +
9147     +err:
9148     + mt76x2u_cleanup(dev);
9149     + return err;
9150     }
9151    
9152     MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
9153     diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
9154     index c0a163e40402..f360690396dd 100644
9155     --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
9156     +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
9157     @@ -266,15 +266,17 @@ static void rsi_rx_done_handler(struct urb *urb)
9158     if (urb->status)
9159     goto out;
9160    
9161     - if (urb->actual_length <= 0) {
9162     - rsi_dbg(INFO_ZONE, "%s: Zero length packet\n", __func__);
9163     + if (urb->actual_length <= 0 ||
9164     + urb->actual_length > rx_cb->rx_skb->len) {
9165     + rsi_dbg(INFO_ZONE, "%s: Invalid packet length = %d\n",
9166     + __func__, urb->actual_length);
9167     goto out;
9168     }
9169     if (skb_queue_len(&dev->rx_q) >= RSI_MAX_RX_PKTS) {
9170     rsi_dbg(INFO_ZONE, "Max RX packets reached\n");
9171     goto out;
9172     }
9173     - skb_put(rx_cb->rx_skb, urb->actual_length);
9174     + skb_trim(rx_cb->rx_skb, urb->actual_length);
9175     skb_queue_tail(&dev->rx_q, rx_cb->rx_skb);
9176    
9177     rsi_set_event(&dev->rx_thread.event);
9178     @@ -308,6 +310,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
9179     if (!skb)
9180     return -ENOMEM;
9181     skb_reserve(skb, MAX_DWORD_ALIGN_BYTES);
9182     + skb_put(skb, RSI_MAX_RX_USB_PKT_SIZE - MAX_DWORD_ALIGN_BYTES);
9183     dword_align_bytes = (unsigned long)skb->data & 0x3f;
9184     if (dword_align_bytes > 0)
9185     skb_push(skb, dword_align_bytes);
9186     @@ -319,7 +322,7 @@ static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num)
9187     usb_rcvbulkpipe(dev->usbdev,
9188     dev->bulkin_endpoint_addr[ep_num - 1]),
9189     urb->transfer_buffer,
9190     - RSI_MAX_RX_USB_PKT_SIZE,
9191     + skb->len,
9192     rsi_rx_done_handler,
9193     rx_cb);
9194    
9195     diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
9196     index 89b0d0fade9f..19e3c5a0b715 100644
9197     --- a/drivers/net/wireless/ti/wlcore/main.c
9198     +++ b/drivers/net/wireless/ti/wlcore/main.c
9199     @@ -957,6 +957,8 @@ static void wl1271_recovery_work(struct work_struct *work)
9200     BUG_ON(wl->conf.recovery.bug_on_recovery &&
9201     !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
9202    
9203     + clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
9204     +
9205     if (wl->conf.recovery.no_recovery) {
9206     wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
9207     goto out_unlock;
9208     @@ -6710,6 +6712,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
9209     int ret;
9210     unsigned long start_time = jiffies;
9211     bool pending = false;
9212     + bool recovery = false;
9213    
9214     /* Nothing to do if no ELP mode requested */
9215     if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
9216     @@ -6726,7 +6729,7 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
9217    
9218     ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
9219     if (ret < 0) {
9220     - wl12xx_queue_recovery_work(wl);
9221     + recovery = true;
9222     goto err;
9223     }
9224    
9225     @@ -6734,11 +6737,12 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
9226     ret = wait_for_completion_timeout(&compl,
9227     msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
9228     if (ret == 0) {
9229     - wl1271_error("ELP wakeup timeout!");
9230     - wl12xx_queue_recovery_work(wl);
9231     + wl1271_warning("ELP wakeup timeout!");
9232    
9233     /* Return no error for runtime PM for recovery */
9234     - return 0;
9235     + ret = 0;
9236     + recovery = true;
9237     + goto err;
9238     }
9239     }
9240    
9241     @@ -6753,6 +6757,12 @@ err:
9242     spin_lock_irqsave(&wl->wl_lock, flags);
9243     wl->elp_compl = NULL;
9244     spin_unlock_irqrestore(&wl->wl_lock, flags);
9245     +
9246     + if (recovery) {
9247     + set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
9248     + wl12xx_queue_recovery_work(wl);
9249     + }
9250     +
9251     return ret;
9252     }
9253    
9254     diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
9255     index 91162f8e0366..9a22056e8d9e 100644
9256     --- a/drivers/nfc/nfcmrvl/uart.c
9257     +++ b/drivers/nfc/nfcmrvl/uart.c
9258     @@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
9259     struct device_node *matched_node;
9260     int ret;
9261    
9262     - matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
9263     + matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
9264     if (!matched_node) {
9265     - matched_node = of_find_compatible_node(node, NULL,
9266     - "mrvl,nfc-uart");
9267     + matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
9268     if (!matched_node)
9269     return -ENODEV;
9270     }
9271     diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
9272     index 8aae6dcc839f..9148015ed803 100644
9273     --- a/drivers/nvdimm/bus.c
9274     +++ b/drivers/nvdimm/bus.c
9275     @@ -488,6 +488,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie)
9276     put_device(dev);
9277     }
9278     put_device(dev);
9279     + if (dev->parent)
9280     + put_device(dev->parent);
9281     }
9282    
9283     static void nd_async_device_unregister(void *d, async_cookie_t cookie)
9284     @@ -507,6 +509,8 @@ void __nd_device_register(struct device *dev)
9285     if (!dev)
9286     return;
9287     dev->bus = &nvdimm_bus_type;
9288     + if (dev->parent)
9289     + get_device(dev->parent);
9290     get_device(dev);
9291     async_schedule_domain(nd_async_device_register, dev,
9292     &nd_async_domain);
9293     diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
9294     index 6071e2942053..2082ae01b9c8 100644
9295     --- a/drivers/nvdimm/pmem.c
9296     +++ b/drivers/nvdimm/pmem.c
9297     @@ -421,9 +421,11 @@ static int pmem_attach_disk(struct device *dev,
9298     addr = devm_memremap_pages(dev, &pmem->pgmap);
9299     pmem->pfn_flags |= PFN_MAP;
9300     memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
9301     - } else
9302     + } else {
9303     addr = devm_memremap(dev, pmem->phys_addr,
9304     pmem->size, ARCH_MEMREMAP_PMEM);
9305     + memcpy(&bb_res, &nsio->res, sizeof(bb_res));
9306     + }
9307    
9308     /*
9309     * At release time the queue must be frozen before
9310     diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
9311     index fa37afcd43ff..174a418cb171 100644
9312     --- a/drivers/nvdimm/region_devs.c
9313     +++ b/drivers/nvdimm/region_devs.c
9314     @@ -560,10 +560,17 @@ static ssize_t region_badblocks_show(struct device *dev,
9315     struct device_attribute *attr, char *buf)
9316     {
9317     struct nd_region *nd_region = to_nd_region(dev);
9318     + ssize_t rc;
9319    
9320     - return badblocks_show(&nd_region->bb, buf, 0);
9321     -}
9322     + device_lock(dev);
9323     + if (dev->driver)
9324     + rc = badblocks_show(&nd_region->bb, buf, 0);
9325     + else
9326     + rc = -ENXIO;
9327     + device_unlock(dev);
9328    
9329     + return rc;
9330     +}
9331     static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
9332    
9333     static ssize_t resource_show(struct device *dev,
9334     diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
9335     index 206d63cb1afc..bcd09d3a44da 100644
9336     --- a/drivers/nvme/host/fabrics.c
9337     +++ b/drivers/nvme/host/fabrics.c
9338     @@ -552,8 +552,11 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
9339     ctrl->state != NVME_CTRL_DEAD &&
9340     !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
9341     return BLK_STS_RESOURCE;
9342     - nvme_req(rq)->status = NVME_SC_ABORT_REQ;
9343     - return BLK_STS_IOERR;
9344     +
9345     + nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
9346     + blk_mq_start_request(rq);
9347     + nvme_complete_rq(rq);
9348     + return BLK_STS_OK;
9349     }
9350     EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
9351    
9352     diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
9353     index 9fe3fff818b8..b71c9ad1bf45 100644
9354     --- a/drivers/nvme/host/multipath.c
9355     +++ b/drivers/nvme/host/multipath.c
9356     @@ -77,6 +77,13 @@ void nvme_failover_req(struct request *req)
9357     queue_work(nvme_wq, &ns->ctrl->ana_work);
9358     }
9359     break;
9360     + case NVME_SC_HOST_PATH_ERROR:
9361     + /*
9362     + * Temporary transport disruption in talking to the controller.
9363     + * Try to send on a new path.
9364     + */
9365     + nvme_mpath_clear_current_path(ns);
9366     + break;
9367     default:
9368     /*
9369     * Reset the controller for any non-ANA error as we don't know
9370     diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
9371     index aa1657831b70..7c530c88b3fb 100644
9372     --- a/drivers/nvmem/core.c
9373     +++ b/drivers/nvmem/core.c
9374     @@ -516,11 +516,17 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
9375     goto err_device_del;
9376     }
9377    
9378     - if (config->cells)
9379     - nvmem_add_cells(nvmem, config->cells, config->ncells);
9380     + if (config->cells) {
9381     + rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
9382     + if (rval)
9383     + goto err_teardown_compat;
9384     + }
9385    
9386     return nvmem;
9387    
9388     +err_teardown_compat:
9389     + if (config->compat)
9390     + device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
9391     err_device_del:
9392     device_del(&nvmem->dev);
9393     err_put_device:
9394     diff --git a/drivers/of/base.c b/drivers/of/base.c
9395     index 74eaedd5b860..70f5fd08891b 100644
9396     --- a/drivers/of/base.c
9397     +++ b/drivers/of/base.c
9398     @@ -67,6 +67,7 @@ bool of_node_name_eq(const struct device_node *np, const char *name)
9399    
9400     return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
9401     }
9402     +EXPORT_SYMBOL(of_node_name_eq);
9403    
9404     bool of_node_name_prefix(const struct device_node *np, const char *prefix)
9405     {
9406     @@ -75,6 +76,7 @@ bool of_node_name_prefix(const struct device_node *np, const char *prefix)
9407    
9408     return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
9409     }
9410     +EXPORT_SYMBOL(of_node_name_prefix);
9411    
9412     int of_n_addr_cells(struct device_node *np)
9413     {
9414     diff --git a/drivers/opp/of.c b/drivers/opp/of.c
9415     index 7af0ddec936b..20988c426650 100644
9416     --- a/drivers/opp/of.c
9417     +++ b/drivers/opp/of.c
9418     @@ -425,6 +425,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
9419     dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
9420     count, pstate_count);
9421     ret = -ENOENT;
9422     + _dev_pm_opp_remove_table(opp_table, dev, false);
9423     goto put_opp_table;
9424     }
9425    
9426     diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
9427     index ce9224a36f62..a32d6dde7a57 100644
9428     --- a/drivers/pci/controller/dwc/pci-dra7xx.c
9429     +++ b/drivers/pci/controller/dwc/pci-dra7xx.c
9430     @@ -542,7 +542,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
9431     };
9432    
9433     /*
9434     - * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
9435     + * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
9436     * @dra7xx: the dra7xx device where the workaround should be applied
9437     *
9438     * Access to the PCIe slave port that are not 32-bit aligned will result
9439     @@ -552,7 +552,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
9440     *
9441     * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
9442     */
9443     -static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
9444     +static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
9445     {
9446     int ret;
9447     struct device_node *np = dev->of_node;
9448     @@ -704,6 +704,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
9449    
9450     dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
9451     DEVICE_TYPE_RC);
9452     +
9453     + ret = dra7xx_pcie_unaligned_memaccess(dev);
9454     + if (ret)
9455     + dev_err(dev, "WA for Errata i870 not applied\n");
9456     +
9457     ret = dra7xx_add_pcie_port(dra7xx, pdev);
9458     if (ret < 0)
9459     goto err_gpio;
9460     @@ -717,7 +722,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
9461     dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
9462     DEVICE_TYPE_EP);
9463    
9464     - ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
9465     + ret = dra7xx_pcie_unaligned_memaccess(dev);
9466     if (ret)
9467     goto err_gpio;
9468    
9469     diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
9470     index 9e87dd7f9ac3..6692654798d4 100644
9471     --- a/drivers/pci/controller/pcie-cadence-ep.c
9472     +++ b/drivers/pci/controller/pcie-cadence-ep.c
9473     @@ -258,7 +258,6 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
9474     u8 intx, bool is_asserted)
9475     {
9476     struct cdns_pcie *pcie = &ep->pcie;
9477     - u32 r = ep->max_regions - 1;
9478     u32 offset;
9479     u16 status;
9480     u8 msg_code;
9481     @@ -268,8 +267,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
9482     /* Set the outbound region if needed. */
9483     if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
9484     ep->irq_pci_fn != fn)) {
9485     - /* Last region was reserved for IRQ writes. */
9486     - cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
9487     + /* First region was reserved for IRQ writes. */
9488     + cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
9489     ep->irq_phys_addr);
9490     ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
9491     ep->irq_pci_fn = fn;
9492     @@ -347,8 +346,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
9493     /* Set the outbound region if needed. */
9494     if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
9495     ep->irq_pci_fn != fn)) {
9496     - /* Last region was reserved for IRQ writes. */
9497     - cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
9498     + /* First region was reserved for IRQ writes. */
9499     + cdns_pcie_set_outbound_region(pcie, fn, 0,
9500     false,
9501     ep->irq_phys_addr,
9502     pci_addr & ~pci_addr_mask,
9503     @@ -517,6 +516,8 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
9504     goto free_epc_mem;
9505     }
9506     ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
9507     + /* Reserve region 0 for IRQs */
9508     + set_bit(0, &ep->ob_region_map);
9509    
9510     return 0;
9511    
9512     diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
9513     index 975bcdd6b5c0..cd795f6fc1e2 100644
9514     --- a/drivers/pci/controller/pcie-cadence.c
9515     +++ b/drivers/pci/controller/pcie-cadence.c
9516     @@ -190,14 +190,16 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
9517    
9518     for (i = 0; i < phy_count; i++) {
9519     of_property_read_string_index(np, "phy-names", i, &name);
9520     - phy[i] = devm_phy_optional_get(dev, name);
9521     - if (IS_ERR(phy))
9522     - return PTR_ERR(phy);
9523     -
9524     + phy[i] = devm_phy_get(dev, name);
9525     + if (IS_ERR(phy[i])) {
9526     + ret = PTR_ERR(phy[i]);
9527     + goto err_phy;
9528     + }
9529     link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
9530     if (!link[i]) {
9531     + devm_phy_put(dev, phy[i]);
9532     ret = -EINVAL;
9533     - goto err_link;
9534     + goto err_phy;
9535     }
9536     }
9537    
9538     @@ -207,13 +209,15 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
9539    
9540     ret = cdns_pcie_enable_phy(pcie);
9541     if (ret)
9542     - goto err_link;
9543     + goto err_phy;
9544    
9545     return 0;
9546    
9547     -err_link:
9548     - while (--i >= 0)
9549     +err_phy:
9550     + while (--i >= 0) {
9551     device_link_del(link[i]);
9552     + devm_phy_put(dev, phy[i]);
9553     + }
9554    
9555     return ret;
9556     }
9557     diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
9558     index 861dda69f366..c5ff6ca65eab 100644
9559     --- a/drivers/pci/controller/pcie-mediatek.c
9560     +++ b/drivers/pci/controller/pcie-mediatek.c
9561     @@ -337,6 +337,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
9562     {
9563     struct mtk_pcie *pcie = bus->sysdata;
9564     struct mtk_pcie_port *port;
9565     + struct pci_dev *dev = NULL;
9566     +
9567     + /*
9568     + * Walk the bus hierarchy to get the devfn value
9569     + * of the port in the root bus.
9570     + */
9571     + while (bus && bus->number) {
9572     + dev = bus->self;
9573     + bus = dev->bus;
9574     + devfn = dev->devfn;
9575     + }
9576    
9577     list_for_each_entry(port, &pcie->ports, list)
9578     if (port->slot == PCI_SLOT(devfn))
9579     diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
9580     index f2ef896464b3..af24ed50a245 100644
9581     --- a/drivers/pci/msi.c
9582     +++ b/drivers/pci/msi.c
9583     @@ -958,7 +958,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
9584     }
9585     }
9586     }
9587     - WARN_ON(!!dev->msix_enabled);
9588    
9589     /* Check whether driver already requested for MSI irq */
9590     if (dev->msi_enabled) {
9591     @@ -1028,8 +1027,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
9592     if (!pci_msi_supported(dev, minvec))
9593     return -EINVAL;
9594    
9595     - WARN_ON(!!dev->msi_enabled);
9596     -
9597     /* Check whether driver already requested MSI-X irqs */
9598     if (dev->msix_enabled) {
9599     pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
9600     @@ -1039,6 +1036,9 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
9601     if (maxvec < minvec)
9602     return -ERANGE;
9603    
9604     + if (WARN_ON_ONCE(dev->msi_enabled))
9605     + return -EINVAL;
9606     +
9607     nvec = pci_msi_vec_count(dev);
9608     if (nvec < 0)
9609     return nvec;
9610     @@ -1087,6 +1087,9 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
9611     if (maxvec < minvec)
9612     return -ERANGE;
9613    
9614     + if (WARN_ON_ONCE(dev->msix_enabled))
9615     + return -EINVAL;
9616     +
9617     for (;;) {
9618     if (affd) {
9619     nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
9620     diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
9621     index c2ab57705043..f8436d1c4d45 100644
9622     --- a/drivers/pci/pci-acpi.c
9623     +++ b/drivers/pci/pci-acpi.c
9624     @@ -762,19 +762,33 @@ static void pci_acpi_setup(struct device *dev)
9625     return;
9626    
9627     device_set_wakeup_capable(dev, true);
9628     + /*
9629     + * For bridges that can do D3 we enable wake automatically (as
9630     + * we do for the power management itself in that case). The
9631     + * reason is that the bridge may have additional methods such as
9632     + * _DSW that need to be called.
9633     + */
9634     + if (pci_dev->bridge_d3)
9635     + device_wakeup_enable(dev);
9636     +
9637     acpi_pci_wakeup(pci_dev, false);
9638     }
9639    
9640     static void pci_acpi_cleanup(struct device *dev)
9641     {
9642     struct acpi_device *adev = ACPI_COMPANION(dev);
9643     + struct pci_dev *pci_dev = to_pci_dev(dev);
9644    
9645     if (!adev)
9646     return;
9647    
9648     pci_acpi_remove_pm_notifier(adev);
9649     - if (adev->wakeup.flags.valid)
9650     + if (adev->wakeup.flags.valid) {
9651     + if (pci_dev->bridge_d3)
9652     + device_wakeup_disable(dev);
9653     +
9654     device_set_wakeup_capable(dev, false);
9655     + }
9656     }
9657    
9658     static bool pci_acpi_bus_match(struct device *dev)
9659     diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
9660     index 5326916715d2..f78860ce884b 100644
9661     --- a/drivers/pci/pcie/aspm.c
9662     +++ b/drivers/pci/pcie/aspm.c
9663     @@ -991,7 +991,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
9664     * All PCIe functions are in one slot, remove one function will remove
9665     * the whole slot, so just wait until we are the last function left.
9666     */
9667     - if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
9668     + if (!list_empty(&parent->subordinate->devices))
9669     goto out;
9670    
9671     link = parent->link_state;
9672     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
9673     index 6bc27b7fd452..c0673a717239 100644
9674     --- a/drivers/pci/quirks.c
9675     +++ b/drivers/pci/quirks.c
9676     @@ -3190,7 +3190,11 @@ static void disable_igfx_irq(struct pci_dev *dev)
9677    
9678     pci_iounmap(dev, regs);
9679     }
9680     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
9681     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
9682     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
9683     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
9684     +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
9685     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
9686     DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
9687    
9688     diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
9689     index 461e7fd2756f..e9c6b120cf45 100644
9690     --- a/drivers/pci/remove.c
9691     +++ b/drivers/pci/remove.c
9692     @@ -25,9 +25,6 @@ static void pci_stop_dev(struct pci_dev *dev)
9693    
9694     pci_dev_assign_added(dev, false);
9695     }
9696     -
9697     - if (dev->bus->self)
9698     - pcie_aspm_exit_link_state(dev);
9699     }
9700    
9701     static void pci_destroy_dev(struct pci_dev *dev)
9702     @@ -41,6 +38,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
9703     list_del(&dev->bus_list);
9704     up_write(&pci_bus_sem);
9705    
9706     + pcie_aspm_exit_link_state(dev);
9707     pci_bridge_d3_update(dev);
9708     pci_free_resources(dev);
9709     put_device(&dev->dev);
9710     diff --git a/drivers/pcmcia/ricoh.h b/drivers/pcmcia/ricoh.h
9711     index 01098c841f87..8ac7b138c094 100644
9712     --- a/drivers/pcmcia/ricoh.h
9713     +++ b/drivers/pcmcia/ricoh.h
9714     @@ -119,6 +119,10 @@
9715     #define RL5C4XX_MISC_CONTROL 0x2F /* 8 bit */
9716     #define RL5C4XX_ZV_ENABLE 0x08
9717    
9718     +/* Misc Control 3 Register */
9719     +#define RL5C4XX_MISC3 0x00A2 /* 16 bit */
9720     +#define RL5C47X_MISC3_CB_CLKRUN_DIS BIT(1)
9721     +
9722     #ifdef __YENTA_H
9723    
9724     #define rl_misc(socket) ((socket)->private[0])
9725     @@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_socket *socket)
9726     }
9727     }
9728    
9729     +static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet)
9730     +{
9731     + u16 misc3;
9732     +
9733     + /*
9734     + * RL5C475II likely has this setting, too, however no datasheet
9735     + * is publicly available for this chip
9736     + */
9737     + if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 &&
9738     + socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478)
9739     + return;
9740     +
9741     + if (socket->dev->revision < 0x80)
9742     + return;
9743     +
9744     + misc3 = config_readw(socket, RL5C4XX_MISC3);
9745     + if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) {
9746     + if (!quiet)
9747     + dev_dbg(&socket->dev->dev,
9748     + "CLKRUN feature already disabled\n");
9749     + } else if (disable_clkrun) {
9750     + if (!quiet)
9751     + dev_info(&socket->dev->dev,
9752     + "Disabling CLKRUN feature\n");
9753     + misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS;
9754     + config_writew(socket, RL5C4XX_MISC3, misc3);
9755     + }
9756     +}
9757     +
9758     static void ricoh_save_state(struct yenta_socket *socket)
9759     {
9760     rl_misc(socket) = config_readw(socket, RL5C4XX_MISC);
9761     @@ -172,6 +205,7 @@ static void ricoh_restore_state(struct yenta_socket *socket)
9762     config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket));
9763     config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket));
9764     config_writew(socket, RL5C4XX_CONFIG, rl_config(socket));
9765     + ricoh_set_clkrun(socket, true);
9766     }
9767    
9768    
9769     @@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_socket *socket)
9770     config_writew(socket, RL5C4XX_CONFIG, config);
9771    
9772     ricoh_set_zv(socket);
9773     + ricoh_set_clkrun(socket, false);
9774    
9775     return 0;
9776     }
9777     diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
9778     index ab3da2262f0f..ac6a3f46b1e6 100644
9779     --- a/drivers/pcmcia/yenta_socket.c
9780     +++ b/drivers/pcmcia/yenta_socket.c
9781     @@ -26,7 +26,8 @@
9782    
9783     static bool disable_clkrun;
9784     module_param(disable_clkrun, bool, 0444);
9785     -MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
9786     +MODULE_PARM_DESC(disable_clkrun,
9787     + "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)");
9788    
9789     static bool isa_probe = 1;
9790     module_param(isa_probe, bool, 0444);
9791     diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
9792     index 6556dbeae65e..ac251c62bc66 100644
9793     --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
9794     +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
9795     @@ -319,6 +319,8 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
9796     pad->function = function;
9797    
9798     ret = pmic_mpp_write_mode_ctl(state, pad);
9799     + if (ret < 0)
9800     + return ret;
9801    
9802     val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
9803    
9804     @@ -343,13 +345,12 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
9805    
9806     switch (param) {
9807     case PIN_CONFIG_BIAS_DISABLE:
9808     - arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN;
9809     + if (pad->pullup != PMIC_MPP_PULL_UP_OPEN)
9810     + return -EINVAL;
9811     + arg = 1;
9812     break;
9813     case PIN_CONFIG_BIAS_PULL_UP:
9814     switch (pad->pullup) {
9815     - case PMIC_MPP_PULL_UP_OPEN:
9816     - arg = 0;
9817     - break;
9818     case PMIC_MPP_PULL_UP_0P6KOHM:
9819     arg = 600;
9820     break;
9821     @@ -364,13 +365,17 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
9822     }
9823     break;
9824     case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
9825     - arg = !pad->is_enabled;
9826     + if (pad->is_enabled)
9827     + return -EINVAL;
9828     + arg = 1;
9829     break;
9830     case PIN_CONFIG_POWER_SOURCE:
9831     arg = pad->power_source;
9832     break;
9833     case PIN_CONFIG_INPUT_ENABLE:
9834     - arg = pad->input_enabled;
9835     + if (!pad->input_enabled)
9836     + return -EINVAL;
9837     + arg = 1;
9838     break;
9839     case PIN_CONFIG_OUTPUT:
9840     arg = pad->out_value;
9841     @@ -382,7 +387,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
9842     arg = pad->amux_input;
9843     break;
9844     case PMIC_MPP_CONF_PAIRED:
9845     - arg = pad->paired;
9846     + if (!pad->paired)
9847     + return -EINVAL;
9848     + arg = 1;
9849     break;
9850     case PIN_CONFIG_DRIVE_STRENGTH:
9851     arg = pad->drive_strength;
9852     @@ -455,7 +462,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
9853     pad->dtest = arg;
9854     break;
9855     case PIN_CONFIG_DRIVE_STRENGTH:
9856     - arg = pad->drive_strength;
9857     + pad->drive_strength = arg;
9858     break;
9859     case PMIC_MPP_CONF_AMUX_ROUTE:
9860     if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
9861     @@ -502,6 +509,10 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
9862     if (ret < 0)
9863     return ret;
9864    
9865     + ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, pad->drive_strength);
9866     + if (ret < 0)
9867     + return ret;
9868     +
9869     val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
9870    
9871     return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
9872     diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
9873     index f53e32a9d8fc..0e153bae322e 100644
9874     --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
9875     +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
9876     @@ -260,22 +260,32 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
9877    
9878     switch (param) {
9879     case PIN_CONFIG_BIAS_DISABLE:
9880     - arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
9881     + if (pin->bias != PM8XXX_GPIO_BIAS_NP)
9882     + return -EINVAL;
9883     + arg = 1;
9884     break;
9885     case PIN_CONFIG_BIAS_PULL_DOWN:
9886     - arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
9887     + if (pin->bias != PM8XXX_GPIO_BIAS_PD)
9888     + return -EINVAL;
9889     + arg = 1;
9890     break;
9891     case PIN_CONFIG_BIAS_PULL_UP:
9892     - arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
9893     + if (pin->bias > PM8XXX_GPIO_BIAS_PU_1P5_30)
9894     + return -EINVAL;
9895     + arg = 1;
9896     break;
9897     case PM8XXX_QCOM_PULL_UP_STRENGTH:
9898     arg = pin->pull_up_strength;
9899     break;
9900     case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
9901     - arg = pin->disable;
9902     + if (!pin->disable)
9903     + return -EINVAL;
9904     + arg = 1;
9905     break;
9906     case PIN_CONFIG_INPUT_ENABLE:
9907     - arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
9908     + if (pin->mode != PM8XXX_GPIO_MODE_INPUT)
9909     + return -EINVAL;
9910     + arg = 1;
9911     break;
9912     case PIN_CONFIG_OUTPUT:
9913     if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
9914     @@ -290,10 +300,14 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
9915     arg = pin->output_strength;
9916     break;
9917     case PIN_CONFIG_DRIVE_PUSH_PULL:
9918     - arg = !pin->open_drain;
9919     + if (pin->open_drain)
9920     + return -EINVAL;
9921     + arg = 1;
9922     break;
9923     case PIN_CONFIG_DRIVE_OPEN_DRAIN:
9924     - arg = pin->open_drain;
9925     + if (!pin->open_drain)
9926     + return -EINVAL;
9927     + arg = 1;
9928     break;
9929     default:
9930     return -EINVAL;
9931     diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
9932     index 4d9bf9b3e9f3..26ebedc1f6d3 100644
9933     --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
9934     +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
9935     @@ -1079,10 +1079,9 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
9936     * We suppose that we won't have any more functions than pins,
9937     * we'll reallocate that later anyway
9938     */
9939     - pctl->functions = devm_kcalloc(&pdev->dev,
9940     - pctl->ngroups,
9941     - sizeof(*pctl->functions),
9942     - GFP_KERNEL);
9943     + pctl->functions = kcalloc(pctl->ngroups,
9944     + sizeof(*pctl->functions),
9945     + GFP_KERNEL);
9946     if (!pctl->functions)
9947     return -ENOMEM;
9948    
9949     @@ -1133,8 +1132,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
9950    
9951     func_item = sunxi_pinctrl_find_function_by_name(pctl,
9952     func->name);
9953     - if (!func_item)
9954     + if (!func_item) {
9955     + kfree(pctl->functions);
9956     return -EINVAL;
9957     + }
9958    
9959     if (!func_item->groups) {
9960     func_item->groups =
9961     @@ -1142,8 +1143,10 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
9962     func_item->ngroups,
9963     sizeof(*func_item->groups),
9964     GFP_KERNEL);
9965     - if (!func_item->groups)
9966     + if (!func_item->groups) {
9967     + kfree(pctl->functions);
9968     return -ENOMEM;
9969     + }
9970     }
9971    
9972     func_grp = func_item->groups;
9973     diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
9974     index bbcaee56db9d..b6a7d9f74cf3 100644
9975     --- a/drivers/power/supply/twl4030_charger.c
9976     +++ b/drivers/power/supply/twl4030_charger.c
9977     @@ -996,12 +996,13 @@ static int twl4030_bci_probe(struct platform_device *pdev)
9978     if (bci->dev->of_node) {
9979     struct device_node *phynode;
9980    
9981     - phynode = of_find_compatible_node(bci->dev->of_node->parent,
9982     - NULL, "ti,twl4030-usb");
9983     + phynode = of_get_compatible_child(bci->dev->of_node->parent,
9984     + "ti,twl4030-usb");
9985     if (phynode) {
9986     bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
9987     bci->transceiver = devm_usb_get_phy_by_node(
9988     bci->dev, phynode, &bci->usb_nb);
9989     + of_node_put(phynode);
9990     if (IS_ERR(bci->transceiver)) {
9991     ret = PTR_ERR(bci->transceiver);
9992     if (ret == -EPROBE_DEFER)
9993     diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
9994     index 61a760ee4aac..e9ab90c19304 100644
9995     --- a/drivers/remoteproc/qcom_q6v5.c
9996     +++ b/drivers/remoteproc/qcom_q6v5.c
9997     @@ -198,6 +198,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
9998     }
9999    
10000     q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
10001     + if (q6v5->fatal_irq == -EPROBE_DEFER)
10002     + return -EPROBE_DEFER;
10003     +
10004     ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
10005     NULL, q6v5_fatal_interrupt,
10006     IRQF_TRIGGER_RISING | IRQF_ONESHOT,
10007     @@ -208,6 +211,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
10008     }
10009    
10010     q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
10011     + if (q6v5->ready_irq == -EPROBE_DEFER)
10012     + return -EPROBE_DEFER;
10013     +
10014     ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
10015     NULL, q6v5_ready_interrupt,
10016     IRQF_TRIGGER_RISING | IRQF_ONESHOT,
10017     @@ -218,6 +224,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
10018     }
10019    
10020     q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
10021     + if (q6v5->handover_irq == -EPROBE_DEFER)
10022     + return -EPROBE_DEFER;
10023     +
10024     ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
10025     NULL, q6v5_handover_interrupt,
10026     IRQF_TRIGGER_RISING | IRQF_ONESHOT,
10027     @@ -229,6 +238,9 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
10028     disable_irq(q6v5->handover_irq);
10029    
10030     q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
10031     + if (q6v5->stop_irq == -EPROBE_DEFER)
10032     + return -EPROBE_DEFER;
10033     +
10034     ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
10035     NULL, q6v5_stop_interrupt,
10036     IRQF_TRIGGER_RISING | IRQF_ONESHOT,
10037     diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
10038     index 8da83a4ebadc..b2e5a6abf7d5 100644
10039     --- a/drivers/rpmsg/qcom_smd.c
10040     +++ b/drivers/rpmsg/qcom_smd.c
10041     @@ -1122,8 +1122,10 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
10042    
10043     channel->edge = edge;
10044     channel->name = kstrdup(name, GFP_KERNEL);
10045     - if (!channel->name)
10046     - return ERR_PTR(-ENOMEM);
10047     + if (!channel->name) {
10048     + ret = -ENOMEM;
10049     + goto free_channel;
10050     + }
10051    
10052     spin_lock_init(&channel->tx_lock);
10053     spin_lock_init(&channel->recv_lock);
10054     @@ -1173,6 +1175,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
10055    
10056     free_name_and_channel:
10057     kfree(channel->name);
10058     +free_channel:
10059     kfree(channel);
10060    
10061     return ERR_PTR(ret);
10062     diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
10063     index cd3a2411bc2f..df0c5776d49b 100644
10064     --- a/drivers/rtc/rtc-cmos.c
10065     +++ b/drivers/rtc/rtc-cmos.c
10066     @@ -50,6 +50,7 @@
10067     /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
10068     #include <linux/mc146818rtc.h>
10069    
10070     +#ifdef CONFIG_ACPI
10071     /*
10072     * Use ACPI SCI to replace HPET interrupt for RTC Alarm event
10073     *
10074     @@ -61,6 +62,18 @@
10075     static bool use_acpi_alarm;
10076     module_param(use_acpi_alarm, bool, 0444);
10077    
10078     +static inline int cmos_use_acpi_alarm(void)
10079     +{
10080     + return use_acpi_alarm;
10081     +}
10082     +#else /* !CONFIG_ACPI */
10083     +
10084     +static inline int cmos_use_acpi_alarm(void)
10085     +{
10086     + return 0;
10087     +}
10088     +#endif
10089     +
10090     struct cmos_rtc {
10091     struct rtc_device *rtc;
10092     struct device *dev;
10093     @@ -167,9 +180,9 @@ static inline int hpet_unregister_irq_handler(irq_handler_t handler)
10094     #endif
10095    
10096     /* Don't use HPET for RTC Alarm event if ACPI Fixed event is used */
10097     -static int use_hpet_alarm(void)
10098     +static inline int use_hpet_alarm(void)
10099     {
10100     - return is_hpet_enabled() && !use_acpi_alarm;
10101     + return is_hpet_enabled() && !cmos_use_acpi_alarm();
10102     }
10103    
10104     /*----------------------------------------------------------------*/
10105     @@ -340,7 +353,7 @@ static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask)
10106     if (use_hpet_alarm())
10107     hpet_set_rtc_irq_bit(mask);
10108    
10109     - if ((mask & RTC_AIE) && use_acpi_alarm) {
10110     + if ((mask & RTC_AIE) && cmos_use_acpi_alarm()) {
10111     if (cmos->wake_on)
10112     cmos->wake_on(cmos->dev);
10113     }
10114     @@ -358,7 +371,7 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
10115     if (use_hpet_alarm())
10116     hpet_mask_rtc_irq_bit(mask);
10117    
10118     - if ((mask & RTC_AIE) && use_acpi_alarm) {
10119     + if ((mask & RTC_AIE) && cmos_use_acpi_alarm()) {
10120     if (cmos->wake_off)
10121     cmos->wake_off(cmos->dev);
10122     }
10123     @@ -980,7 +993,7 @@ static int cmos_suspend(struct device *dev)
10124     }
10125     spin_unlock_irq(&rtc_lock);
10126    
10127     - if ((tmp & RTC_AIE) && !use_acpi_alarm) {
10128     + if ((tmp & RTC_AIE) && !cmos_use_acpi_alarm()) {
10129     cmos->enabled_wake = 1;
10130     if (cmos->wake_on)
10131     cmos->wake_on(dev);
10132     @@ -1031,7 +1044,7 @@ static void cmos_check_wkalrm(struct device *dev)
10133     * ACPI RTC wake event is cleared after resume from STR,
10134     * ACK the rtc irq here
10135     */
10136     - if (t_now >= cmos->alarm_expires && use_acpi_alarm) {
10137     + if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
10138     cmos_interrupt(0, (void *)cmos->rtc);
10139     return;
10140     }
10141     @@ -1053,7 +1066,7 @@ static int __maybe_unused cmos_resume(struct device *dev)
10142     struct cmos_rtc *cmos = dev_get_drvdata(dev);
10143     unsigned char tmp;
10144    
10145     - if (cmos->enabled_wake && !use_acpi_alarm) {
10146     + if (cmos->enabled_wake && !cmos_use_acpi_alarm()) {
10147     if (cmos->wake_off)
10148     cmos->wake_off(dev);
10149     else
10150     @@ -1132,7 +1145,7 @@ static u32 rtc_handler(void *context)
10151     * Or else, ACPI SCI is enabled during suspend/resume only,
10152     * update rtc irq in that case.
10153     */
10154     - if (use_acpi_alarm)
10155     + if (cmos_use_acpi_alarm())
10156     cmos_interrupt(0, (void *)cmos->rtc);
10157     else {
10158     /* Fix me: can we use cmos_interrupt() here as well? */
10159     diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
10160     index 4b2b4627daeb..71396b62dc52 100644
10161     --- a/drivers/rtc/rtc-ds1307.c
10162     +++ b/drivers/rtc/rtc-ds1307.c
10163     @@ -1384,7 +1384,6 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
10164     static const struct regmap_config regmap_config = {
10165     .reg_bits = 8,
10166     .val_bits = 8,
10167     - .max_register = 0x9,
10168     };
10169    
10170     static int ds1307_probe(struct i2c_client *client,
10171     diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
10172     index c3fc34b9964d..9e5d3f7d29ae 100644
10173     --- a/drivers/scsi/esp_scsi.c
10174     +++ b/drivers/scsi/esp_scsi.c
10175     @@ -1338,6 +1338,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
10176    
10177     bytes_sent = esp->data_dma_len;
10178     bytes_sent -= ecount;
10179     + bytes_sent -= esp->send_cmd_residual;
10180    
10181     /*
10182     * The am53c974 has a DMA 'pecularity'. The doc states:
10183     diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
10184     index 8163dca2071b..a77772777a30 100644
10185     --- a/drivers/scsi/esp_scsi.h
10186     +++ b/drivers/scsi/esp_scsi.h
10187     @@ -540,6 +540,8 @@ struct esp {
10188    
10189     void *dma;
10190     int dmarev;
10191     +
10192     + u32 send_cmd_residual;
10193     };
10194    
10195     /* A front-end driver for the ESP chip should do the following in
10196     diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
10197     index 5c7858e735c9..200b5bca1f5f 100644
10198     --- a/drivers/scsi/lpfc/lpfc_scsi.c
10199     +++ b/drivers/scsi/lpfc/lpfc_scsi.c
10200     @@ -4158,9 +4158,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
10201     }
10202     lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
10203    
10204     - spin_lock_irqsave(&phba->hbalock, flags);
10205     - lpfc_cmd->pCmd = NULL;
10206     - spin_unlock_irqrestore(&phba->hbalock, flags);
10207     + /* If pCmd was set to NULL from abort path, do not call scsi_done */
10208     + if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
10209     + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
10210     + "0711 FCP cmd already NULL, sid: 0x%06x, "
10211     + "did: 0x%06x, oxid: 0x%04x\n",
10212     + vport->fc_myDID,
10213     + (pnode) ? pnode->nlp_DID : 0,
10214     + phba->sli_rev == LPFC_SLI_REV4 ?
10215     + lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
10216     + return;
10217     + }
10218    
10219     /* The sdev is not guaranteed to be valid post scsi_done upcall. */
10220     cmd->scsi_done(cmd);
10221     diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
10222     index 9830bdb6e072..a95c823cd1a4 100644
10223     --- a/drivers/scsi/lpfc/lpfc_sli.c
10224     +++ b/drivers/scsi/lpfc/lpfc_sli.c
10225     @@ -3797,6 +3797,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
10226     struct hbq_dmabuf *dmabuf;
10227     struct lpfc_cq_event *cq_event;
10228     unsigned long iflag;
10229     + int count = 0;
10230    
10231     spin_lock_irqsave(&phba->hbalock, iflag);
10232     phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
10233     @@ -3818,16 +3819,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
10234     if (irspiocbq)
10235     lpfc_sli_sp_handle_rspiocb(phba, pring,
10236     irspiocbq);
10237     + count++;
10238     break;
10239     case CQE_CODE_RECEIVE:
10240     case CQE_CODE_RECEIVE_V1:
10241     dmabuf = container_of(cq_event, struct hbq_dmabuf,
10242     cq_event);
10243     lpfc_sli4_handle_received_buffer(phba, dmabuf);
10244     + count++;
10245     break;
10246     default:
10247     break;
10248     }
10249     +
10250     + /* Limit the number of events to 64 to avoid soft lockups */
10251     + if (count == 64)
10252     + break;
10253     }
10254     }
10255    
10256     diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
10257     index eb551f3cc471..71879f2207e0 100644
10258     --- a/drivers/scsi/mac_esp.c
10259     +++ b/drivers/scsi/mac_esp.c
10260     @@ -427,6 +427,8 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
10261     scsi_esp_cmd(esp, ESP_CMD_TI);
10262     }
10263     }
10264     +
10265     + esp->send_cmd_residual = esp_count;
10266     }
10267    
10268     static int mac_esp_irq_pending(struct esp *esp)
10269     diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
10270     index 9aa9590c5373..f6de7526ded5 100644
10271     --- a/drivers/scsi/megaraid/megaraid_sas_base.c
10272     +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
10273     @@ -7523,6 +7523,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
10274     get_user(user_sense_off, &cioc->sense_off))
10275     return -EFAULT;
10276    
10277     + if (local_sense_off != user_sense_off)
10278     + return -EINVAL;
10279     +
10280     if (local_sense_len) {
10281     void __user **sense_ioc_ptr =
10282     (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
10283     diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
10284     index 2c6c2cd5a0d0..596a9b214df1 100644
10285     --- a/drivers/scsi/qla2xxx/qla_mbx.c
10286     +++ b/drivers/scsi/qla2xxx/qla_mbx.c
10287     @@ -493,7 +493,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
10288     set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
10289     qla2xxx_wake_dpc(vha);
10290     }
10291     - } else if (!abort_active) {
10292     + } else if (current == ha->dpc_thread) {
10293     /* call abort directly since we are in the DPC thread */
10294     ql_dbg(ql_dbg_mbx, vha, 0x101d,
10295     "Timeout, calling abort_isp.\n");
10296     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
10297     index c55f38ec391c..54074dd483a7 100644
10298     --- a/drivers/scsi/ufs/ufshcd.c
10299     +++ b/drivers/scsi/ufs/ufshcd.c
10300     @@ -1691,8 +1691,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
10301    
10302     hba->clk_gating.state = REQ_CLKS_OFF;
10303     trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
10304     - schedule_delayed_work(&hba->clk_gating.gate_work,
10305     - msecs_to_jiffies(hba->clk_gating.delay_ms));
10306     + queue_delayed_work(hba->clk_gating.clk_gating_workq,
10307     + &hba->clk_gating.gate_work,
10308     + msecs_to_jiffies(hba->clk_gating.delay_ms));
10309     }
10310    
10311     void ufshcd_release(struct ufs_hba *hba)
10312     diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
10313     index 8a3678c2e83c..97bb5989aa21 100644
10314     --- a/drivers/soc/qcom/rmtfs_mem.c
10315     +++ b/drivers/soc/qcom/rmtfs_mem.c
10316     @@ -212,6 +212,11 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
10317     dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
10318     goto remove_cdev;
10319     } else if (!ret) {
10320     + if (!qcom_scm_is_available()) {
10321     + ret = -EPROBE_DEFER;
10322     + goto remove_cdev;
10323     + }
10324     +
10325     perms[0].vmid = QCOM_SCM_VMID_HLOS;
10326     perms[0].perm = QCOM_SCM_PERM_RW;
10327     perms[1].vmid = vmid;
10328     diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
10329     index 2d6f3fcf3211..ed71a4c9c8b2 100644
10330     --- a/drivers/soc/tegra/pmc.c
10331     +++ b/drivers/soc/tegra/pmc.c
10332     @@ -1288,7 +1288,7 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
10333     if (!pmc->soc->has_tsense_reset)
10334     return;
10335    
10336     - np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
10337     + np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip");
10338     if (!np) {
10339     dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
10340     return;
10341     diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
10342     index 8612525fa4e3..584bcb018a62 100644
10343     --- a/drivers/spi/spi-bcm-qspi.c
10344     +++ b/drivers/spi/spi-bcm-qspi.c
10345     @@ -89,7 +89,7 @@
10346     #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
10347     #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
10348    
10349     -#define BSPI_READ_LENGTH 512
10350     +#define BSPI_READ_LENGTH 256
10351    
10352     /* MSPI register offsets */
10353     #define MSPI_SPCR0_LSB 0x000
10354     @@ -355,7 +355,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
10355     int bpc = 0, bpp = 0;
10356     u8 command = op->cmd.opcode;
10357     int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE;
10358     - int addrlen = op->addr.nbytes * 8;
10359     + int addrlen = op->addr.nbytes;
10360     int flex_mode = 1;
10361    
10362     dev_dbg(&qspi->pdev->dev, "set flex mode w %x addrlen %x hp %d\n",
10363     diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
10364     index f1526757aaf6..79fc3940245a 100644
10365     --- a/drivers/spi/spi-ep93xx.c
10366     +++ b/drivers/spi/spi-ep93xx.c
10367     @@ -246,6 +246,19 @@ static int ep93xx_spi_read_write(struct spi_master *master)
10368     return -EINPROGRESS;
10369     }
10370    
10371     +static enum dma_transfer_direction
10372     +ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
10373     +{
10374     + switch (dir) {
10375     + case DMA_TO_DEVICE:
10376     + return DMA_MEM_TO_DEV;
10377     + case DMA_FROM_DEVICE:
10378     + return DMA_DEV_TO_MEM;
10379     + default:
10380     + return DMA_TRANS_NONE;
10381     + }
10382     +}
10383     +
10384     /**
10385     * ep93xx_spi_dma_prepare() - prepares a DMA transfer
10386     * @master: SPI master
10387     @@ -257,7 +270,7 @@ static int ep93xx_spi_read_write(struct spi_master *master)
10388     */
10389     static struct dma_async_tx_descriptor *
10390     ep93xx_spi_dma_prepare(struct spi_master *master,
10391     - enum dma_transfer_direction dir)
10392     + enum dma_data_direction dir)
10393     {
10394     struct ep93xx_spi *espi = spi_master_get_devdata(master);
10395     struct spi_transfer *xfer = master->cur_msg->state;
10396     @@ -277,9 +290,9 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
10397     buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
10398    
10399     memset(&conf, 0, sizeof(conf));
10400     - conf.direction = dir;
10401     + conf.direction = ep93xx_dma_data_to_trans_dir(dir);
10402    
10403     - if (dir == DMA_DEV_TO_MEM) {
10404     + if (dir == DMA_FROM_DEVICE) {
10405     chan = espi->dma_rx;
10406     buf = xfer->rx_buf;
10407     sgt = &espi->rx_sgt;
10408     @@ -343,7 +356,8 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
10409     if (!nents)
10410     return ERR_PTR(-ENOMEM);
10411    
10412     - txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
10413     + txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
10414     + DMA_CTRL_ACK);
10415     if (!txd) {
10416     dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
10417     return ERR_PTR(-ENOMEM);
10418     @@ -360,13 +374,13 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
10419     * unmapped.
10420     */
10421     static void ep93xx_spi_dma_finish(struct spi_master *master,
10422     - enum dma_transfer_direction dir)
10423     + enum dma_data_direction dir)
10424     {
10425     struct ep93xx_spi *espi = spi_master_get_devdata(master);
10426     struct dma_chan *chan;
10427     struct sg_table *sgt;
10428    
10429     - if (dir == DMA_DEV_TO_MEM) {
10430     + if (dir == DMA_FROM_DEVICE) {
10431     chan = espi->dma_rx;
10432     sgt = &espi->rx_sgt;
10433     } else {
10434     @@ -381,8 +395,8 @@ static void ep93xx_spi_dma_callback(void *callback_param)
10435     {
10436     struct spi_master *master = callback_param;
10437    
10438     - ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV);
10439     - ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
10440     + ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
10441     + ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
10442    
10443     spi_finalize_current_transfer(master);
10444     }
10445     @@ -392,15 +406,15 @@ static int ep93xx_spi_dma_transfer(struct spi_master *master)
10446     struct ep93xx_spi *espi = spi_master_get_devdata(master);
10447     struct dma_async_tx_descriptor *rxd, *txd;
10448    
10449     - rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM);
10450     + rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
10451     if (IS_ERR(rxd)) {
10452     dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
10453     return PTR_ERR(rxd);
10454     }
10455    
10456     - txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV);
10457     + txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
10458     if (IS_ERR(txd)) {
10459     - ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
10460     + ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
10461     dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
10462     return PTR_ERR(txd);
10463     }
10464     diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
10465     index 421bfc7dda67..088772ebef9b 100644
10466     --- a/drivers/spi/spi-gpio.c
10467     +++ b/drivers/spi/spi-gpio.c
10468     @@ -295,9 +295,11 @@ static int spi_gpio_request(struct device *dev,
10469     spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
10470     if (IS_ERR(spi_gpio->miso))
10471     return PTR_ERR(spi_gpio->miso);
10472     - if (!spi_gpio->miso)
10473     - /* HW configuration without MISO pin */
10474     - *mflags |= SPI_MASTER_NO_RX;
10475     + /*
10476     + * No setting SPI_MASTER_NO_RX here - if there is only a MOSI
10477     + * pin connected the host can still do RX by changing the
10478     + * direction of the line.
10479     + */
10480    
10481     spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
10482     if (IS_ERR(spi_gpio->sck))
10483     @@ -423,7 +425,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
10484     spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
10485     spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
10486    
10487     - if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
10488     + if ((master_flags & SPI_MASTER_NO_TX) == 0) {
10489     spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
10490     spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
10491     spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
10492     diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
10493     index e43842c7a31a..eb72dba71d83 100644
10494     --- a/drivers/spi/spi-mem.c
10495     +++ b/drivers/spi/spi-mem.c
10496     @@ -346,10 +346,25 @@ EXPORT_SYMBOL_GPL(spi_mem_get_name);
10497     int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
10498     {
10499     struct spi_controller *ctlr = mem->spi->controller;
10500     + size_t len;
10501     +
10502     + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
10503    
10504     if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
10505     return ctlr->mem_ops->adjust_op_size(mem, op);
10506    
10507     + if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
10508     + if (len > spi_max_transfer_size(mem->spi))
10509     + return -EINVAL;
10510     +
10511     + op->data.nbytes = min3((size_t)op->data.nbytes,
10512     + spi_max_transfer_size(mem->spi),
10513     + spi_max_message_size(mem->spi) -
10514     + len);
10515     + if (!op->data.nbytes)
10516     + return -EINVAL;
10517     + }
10518     +
10519     return 0;
10520     }
10521     EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
10522     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
10523     index 86c0156e6c88..fc3093d21b96 100644
10524     --- a/drivers/target/target_core_transport.c
10525     +++ b/drivers/target/target_core_transport.c
10526     @@ -2754,7 +2754,7 @@ static void target_release_cmd_kref(struct kref *kref)
10527     if (se_sess) {
10528     spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
10529     list_del_init(&se_cmd->se_cmd_list);
10530     - if (list_empty(&se_sess->sess_cmd_list))
10531     + if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
10532     wake_up(&se_sess->cmd_list_wq);
10533     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
10534     }
10535     @@ -2907,7 +2907,7 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
10536    
10537     spin_lock_irq(&se_sess->sess_cmd_lock);
10538     do {
10539     - ret = wait_event_interruptible_lock_irq_timeout(
10540     + ret = wait_event_lock_irq_timeout(
10541     se_sess->cmd_list_wq,
10542     list_empty(&se_sess->sess_cmd_list),
10543     se_sess->sess_cmd_lock, 180 * HZ);
10544     diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
10545     index 3be9519654e5..cf3fad2cb871 100644
10546     --- a/drivers/tc/tc.c
10547     +++ b/drivers/tc/tc.c
10548     @@ -2,7 +2,7 @@
10549     * TURBOchannel bus services.
10550     *
10551     * Copyright (c) Harald Koerfgen, 1998
10552     - * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
10553     + * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
10554     * Copyright (c) 2005 James Simmons
10555     *
10556     * This file is subject to the terms and conditions of the GNU
10557     @@ -10,6 +10,7 @@
10558     * directory of this archive for more details.
10559     */
10560     #include <linux/compiler.h>
10561     +#include <linux/dma-mapping.h>
10562     #include <linux/errno.h>
10563     #include <linux/init.h>
10564     #include <linux/ioport.h>
10565     @@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
10566     tdev->dev.bus = &tc_bus_type;
10567     tdev->slot = slot;
10568    
10569     + /* TURBOchannel has 34-bit DMA addressing (16GiB space). */
10570     + tdev->dma_mask = DMA_BIT_MASK(34);
10571     + tdev->dev.dma_mask = &tdev->dma_mask;
10572     + tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
10573     +
10574     for (i = 0; i < 8; i++) {
10575     tdev->firmware[i] =
10576     readb(module + offset + TC_FIRM_VER + 4 * i);
10577     diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
10578     index dd8dd947b7f0..01b0cb994457 100644
10579     --- a/drivers/thermal/da9062-thermal.c
10580     +++ b/drivers/thermal/da9062-thermal.c
10581     @@ -106,7 +106,7 @@ static void da9062_thermal_poll_on(struct work_struct *work)
10582     THERMAL_EVENT_UNSPECIFIED);
10583    
10584     delay = msecs_to_jiffies(thermal->zone->passive_delay);
10585     - schedule_delayed_work(&thermal->work, delay);
10586     + queue_delayed_work(system_freezable_wq, &thermal->work, delay);
10587     return;
10588     }
10589    
10590     @@ -125,7 +125,7 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data)
10591     struct da9062_thermal *thermal = data;
10592    
10593     disable_irq_nosync(thermal->irq);
10594     - schedule_delayed_work(&thermal->work, 0);
10595     + queue_delayed_work(system_freezable_wq, &thermal->work, 0);
10596    
10597     return IRQ_HANDLED;
10598     }
10599     diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
10600     index 78f932822d38..8df2ce94c28d 100644
10601     --- a/drivers/thermal/rcar_thermal.c
10602     +++ b/drivers/thermal/rcar_thermal.c
10603     @@ -453,6 +453,7 @@ static int rcar_thermal_remove(struct platform_device *pdev)
10604    
10605     rcar_thermal_for_each_priv(priv, common) {
10606     rcar_thermal_irq_disable(priv);
10607     + cancel_delayed_work_sync(&priv->work);
10608     if (priv->chip->use_of_thermal)
10609     thermal_remove_hwmon_sysfs(priv->zone);
10610     else
10611     diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
10612     index b4ba2b1dab76..f4d0ef695225 100644
10613     --- a/drivers/tty/serial/kgdboc.c
10614     +++ b/drivers/tty/serial/kgdboc.c
10615     @@ -130,6 +130,11 @@ static void kgdboc_unregister_kbd(void)
10616    
10617     static int kgdboc_option_setup(char *opt)
10618     {
10619     + if (!opt) {
10620     + pr_err("kgdboc: config string not provided\n");
10621     + return -EINVAL;
10622     + }
10623     +
10624     if (strlen(opt) >= MAX_CONFIG_LEN) {
10625     printk(KERN_ERR "kgdboc: config string too long\n");
10626     return -ENOSPC;
10627     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
10628     index 5f1183b0b89d..476ec4b1b86c 100644
10629     --- a/drivers/tty/vt/vt.c
10630     +++ b/drivers/tty/vt/vt.c
10631     @@ -1551,7 +1551,7 @@ static void csi_K(struct vc_data *vc, int vpar)
10632     scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
10633     vc->vc_need_wrap = 0;
10634     if (con_should_update(vc))
10635     - do_update_region(vc, (unsigned long) start, count);
10636     + do_update_region(vc, (unsigned long)(start + offset), count);
10637     }
10638    
10639     static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
10640     diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
10641     index 70a7981b94b3..9916edda5271 100644
10642     --- a/drivers/uio/uio.c
10643     +++ b/drivers/uio/uio.c
10644     @@ -274,6 +274,8 @@ static struct class uio_class = {
10645     .dev_groups = uio_groups,
10646     };
10647    
10648     +bool uio_class_registered;
10649     +
10650     /*
10651     * device functions
10652     */
10653     @@ -876,6 +878,9 @@ static int init_uio_class(void)
10654     printk(KERN_ERR "class_register failed for uio\n");
10655     goto err_class_register;
10656     }
10657     +
10658     + uio_class_registered = true;
10659     +
10660     return 0;
10661    
10662     err_class_register:
10663     @@ -886,6 +891,7 @@ exit:
10664    
10665     static void release_uio_class(void)
10666     {
10667     + uio_class_registered = false;
10668     class_unregister(&uio_class);
10669     uio_major_cleanup();
10670     }
10671     @@ -912,6 +918,9 @@ int __uio_register_device(struct module *owner,
10672     struct uio_device *idev;
10673     int ret = 0;
10674    
10675     + if (!uio_class_registered)
10676     + return -EPROBE_DEFER;
10677     +
10678     if (!parent || !info || !info->name || !info->version)
10679     return -EINVAL;
10680    
10681     diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
10682     index 7e7428e48bfa..4f8b8179ec96 100644
10683     --- a/drivers/usb/chipidea/otg.h
10684     +++ b/drivers/usb/chipidea/otg.h
10685     @@ -17,7 +17,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci);
10686     static inline void ci_otg_queue_work(struct ci_hdrc *ci)
10687     {
10688     disable_irq_nosync(ci->irq);
10689     - queue_work(ci->wq, &ci->work);
10690     + if (queue_work(ci->wq, &ci->work) == false)
10691     + enable_irq(ci->irq);
10692     }
10693    
10694     #endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
10695     diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
10696     index 2bd6e6bfc241..260010abf9d8 100644
10697     --- a/drivers/usb/dwc2/hcd.c
10698     +++ b/drivers/usb/dwc2/hcd.c
10699     @@ -4393,6 +4393,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
10700     struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
10701     struct usb_bus *bus = hcd_to_bus(hcd);
10702     unsigned long flags;
10703     + int ret;
10704    
10705     dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
10706    
10707     @@ -4408,6 +4409,13 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
10708    
10709     dwc2_hcd_reinit(hsotg);
10710    
10711     + /* enable external vbus supply before resuming root hub */
10712     + spin_unlock_irqrestore(&hsotg->lock, flags);
10713     + ret = dwc2_vbus_supply_init(hsotg);
10714     + if (ret)
10715     + return ret;
10716     + spin_lock_irqsave(&hsotg->lock, flags);
10717     +
10718     /* Initialize and connect root hub if one is not already attached */
10719     if (bus->root_hub) {
10720     dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
10721     @@ -4417,7 +4425,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
10722    
10723     spin_unlock_irqrestore(&hsotg->lock, flags);
10724    
10725     - return dwc2_vbus_supply_init(hsotg);
10726     + return 0;
10727     }
10728    
10729     /*
10730     @@ -4482,7 +4490,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
10731     hprt0 |= HPRT0_SUSP;
10732     hprt0 &= ~HPRT0_PWR;
10733     dwc2_writel(hsotg, hprt0, HPRT0);
10734     + spin_unlock_irqrestore(&hsotg->lock, flags);
10735     dwc2_vbus_supply_exit(hsotg);
10736     + spin_lock_irqsave(&hsotg->lock, flags);
10737     }
10738    
10739     /* Enter partial_power_down */
10740     diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
10741     index 17147b8c771e..8f267be1745d 100644
10742     --- a/drivers/usb/gadget/udc/atmel_usba_udc.c
10743     +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
10744     @@ -2017,6 +2017,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
10745    
10746     udc->errata = match->data;
10747     udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
10748     + if (IS_ERR(udc->pmc))
10749     + udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
10750     if (IS_ERR(udc->pmc))
10751     udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
10752     if (udc->errata && IS_ERR(udc->pmc))
10753     diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
10754     index e1656f361e08..67d8a501d994 100644
10755     --- a/drivers/usb/gadget/udc/renesas_usb3.c
10756     +++ b/drivers/usb/gadget/udc/renesas_usb3.c
10757     @@ -2437,6 +2437,9 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
10758     else
10759     usb3->forced_b_device = false;
10760    
10761     + if (usb3->workaround_for_vbus)
10762     + usb3_disconnect(usb3);
10763     +
10764     /* Let this driver call usb3_connect() anyway */
10765     usb3_check_id(usb3);
10766    
10767     diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
10768     index e98673954020..ec6739ef3129 100644
10769     --- a/drivers/usb/host/ohci-at91.c
10770     +++ b/drivers/usb/host/ohci-at91.c
10771     @@ -551,6 +551,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
10772     pdata->overcurrent_pin[i] =
10773     devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
10774     i, GPIOD_IN);
10775     + if (!pdata->overcurrent_pin[i])
10776     + continue;
10777     if (IS_ERR(pdata->overcurrent_pin[i])) {
10778     err = PTR_ERR(pdata->overcurrent_pin[i]);
10779     dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
10780     diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
10781     index 7e2a531ba321..12eea73d9f20 100644
10782     --- a/drivers/usb/host/xhci-hub.c
10783     +++ b/drivers/usb/host/xhci-hub.c
10784     @@ -900,6 +900,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
10785     set_bit(wIndex, &bus_state->resuming_ports);
10786     bus_state->resume_done[wIndex] = timeout;
10787     mod_timer(&hcd->rh_timer, timeout);
10788     + usb_hcd_start_port_resume(&hcd->self, wIndex);
10789     }
10790     /* Has resume been signalled for USB_RESUME_TIME yet? */
10791     } else if (time_after_eq(jiffies,
10792     @@ -940,6 +941,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
10793     clear_bit(wIndex, &bus_state->rexit_ports);
10794     }
10795    
10796     + usb_hcd_end_port_resume(&hcd->self, wIndex);
10797     bus_state->port_c_suspend |= 1 << wIndex;
10798     bus_state->suspended_ports &= ~(1 << wIndex);
10799     } else {
10800     @@ -962,6 +964,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
10801     (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
10802     bus_state->resume_done[wIndex] = 0;
10803     clear_bit(wIndex, &bus_state->resuming_ports);
10804     + usb_hcd_end_port_resume(&hcd->self, wIndex);
10805     }
10806    
10807    
10808     @@ -1337,6 +1340,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
10809     goto error;
10810    
10811     set_bit(wIndex, &bus_state->resuming_ports);
10812     + usb_hcd_start_port_resume(&hcd->self, wIndex);
10813     xhci_set_link_state(xhci, ports[wIndex],
10814     XDEV_RESUME);
10815     spin_unlock_irqrestore(&xhci->lock, flags);
10816     @@ -1345,6 +1349,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
10817     xhci_set_link_state(xhci, ports[wIndex],
10818     XDEV_U0);
10819     clear_bit(wIndex, &bus_state->resuming_ports);
10820     + usb_hcd_end_port_resume(&hcd->self, wIndex);
10821     }
10822     bus_state->port_c_suspend |= 1 << wIndex;
10823    
10824     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
10825     index f0a99aa0ac58..cd4659703647 100644
10826     --- a/drivers/usb/host/xhci-ring.c
10827     +++ b/drivers/usb/host/xhci-ring.c
10828     @@ -1602,6 +1602,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
10829     set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
10830     mod_timer(&hcd->rh_timer,
10831     bus_state->resume_done[hcd_portnum]);
10832     + usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
10833     bogus_port_status = true;
10834     }
10835     }
10836     diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
10837     index 4f1f4215f3d6..c74cc9c309b1 100644
10838     --- a/drivers/usb/typec/tcpm.c
10839     +++ b/drivers/usb/typec/tcpm.c
10840     @@ -1430,8 +1430,8 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
10841     if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
10842     break;
10843    
10844     - if (pdo_pps_apdo_max_current(pdo[i]) <
10845     - pdo_pps_apdo_max_current(pdo[i - 1]))
10846     + if (pdo_pps_apdo_max_voltage(pdo[i]) <
10847     + pdo_pps_apdo_max_voltage(pdo[i - 1]))
10848     return PDO_ERR_PPS_APDO_NOT_SORTED;
10849     else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
10850     pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
10851     @@ -4116,6 +4116,9 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
10852     goto port_unlock;
10853     }
10854    
10855     + /* Round down operating current to align with PPS valid steps */
10856     + op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
10857     +
10858     reinit_completion(&port->pps_complete);
10859     port->pps_data.op_curr = op_curr;
10860     port->pps_status = 0;
10861     @@ -4169,6 +4172,9 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
10862     goto port_unlock;
10863     }
10864    
10865     + /* Round down output voltage to align with PPS valid steps */
10866     + out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
10867     +
10868     reinit_completion(&port->pps_complete);
10869     port->pps_data.out_volt = out_volt;
10870     port->pps_status = 0;
10871     diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
10872     index 3fc22037a82f..390733e6937e 100644
10873     --- a/drivers/usb/usbip/vudc_main.c
10874     +++ b/drivers/usb/usbip/vudc_main.c
10875     @@ -73,6 +73,10 @@ static int __init init(void)
10876     cleanup:
10877     list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
10878     list_del(&udc_dev->dev_entry);
10879     + /*
10880     + * Just do platform_device_del() here, put_vudc_device()
10881     + * calls the platform_device_put()
10882     + */
10883     platform_device_del(udc_dev->pdev);
10884     put_vudc_device(udc_dev);
10885     }
10886     @@ -89,7 +93,11 @@ static void __exit cleanup(void)
10887    
10888     list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
10889     list_del(&udc_dev->dev_entry);
10890     - platform_device_unregister(udc_dev->pdev);
10891     + /*
10892     + * Just do platform_device_del() here, put_vudc_device()
10893     + * calls the platform_device_put()
10894     + */
10895     + platform_device_del(udc_dev->pdev);
10896     put_vudc_device(udc_dev);
10897     }
10898     platform_driver_unregister(&vudc_driver);
10899     diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
10900     index 38716eb50408..8a3e8f61b991 100644
10901     --- a/drivers/video/hdmi.c
10902     +++ b/drivers/video/hdmi.c
10903     @@ -592,10 +592,10 @@ hdmi_extended_colorimetry_get_name(enum hdmi_extended_colorimetry ext_col)
10904     return "xvYCC 709";
10905     case HDMI_EXTENDED_COLORIMETRY_S_YCC_601:
10906     return "sYCC 601";
10907     - case HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601:
10908     - return "Adobe YCC 601";
10909     - case HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB:
10910     - return "Adobe RGB";
10911     + case HDMI_EXTENDED_COLORIMETRY_OPYCC_601:
10912     + return "opYCC 601";
10913     + case HDMI_EXTENDED_COLORIMETRY_OPRGB:
10914     + return "opRGB";
10915     case HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM:
10916     return "BT.2020 Constant Luminance";
10917     case HDMI_EXTENDED_COLORIMETRY_BT2020:
10918     diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
10919     index 83fc9aab34e8..3099052e1243 100644
10920     --- a/drivers/w1/masters/omap_hdq.c
10921     +++ b/drivers/w1/masters/omap_hdq.c
10922     @@ -763,6 +763,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
10923     /* remove module dependency */
10924     pm_runtime_disable(&pdev->dev);
10925    
10926     + w1_remove_master_device(&omap_w1_master);
10927     +
10928     return 0;
10929     }
10930    
10931     diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
10932     index df1ed37c3269..de01a6d0059d 100644
10933     --- a/drivers/xen/privcmd-buf.c
10934     +++ b/drivers/xen/privcmd-buf.c
10935     @@ -21,15 +21,9 @@
10936    
10937     MODULE_LICENSE("GPL");
10938    
10939     -static unsigned int limit = 64;
10940     -module_param(limit, uint, 0644);
10941     -MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
10942     - "the privcmd-buf device per open file");
10943     -
10944     struct privcmd_buf_private {
10945     struct mutex lock;
10946     struct list_head list;
10947     - unsigned int allocated;
10948     };
10949    
10950     struct privcmd_buf_vma_private {
10951     @@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
10952     {
10953     unsigned int i;
10954    
10955     - vma_priv->file_priv->allocated -= vma_priv->n_pages;
10956     -
10957     list_del(&vma_priv->list);
10958    
10959     for (i = 0; i < vma_priv->n_pages; i++)
10960     - if (vma_priv->pages[i])
10961     - __free_page(vma_priv->pages[i]);
10962     + __free_page(vma_priv->pages[i]);
10963    
10964     kfree(vma_priv);
10965     }
10966     @@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
10967     unsigned int i;
10968     int ret = 0;
10969    
10970     - if (!(vma->vm_flags & VM_SHARED) || count > limit ||
10971     - file_priv->allocated + count > limit)
10972     + if (!(vma->vm_flags & VM_SHARED))
10973     return -EINVAL;
10974    
10975     vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
10976     @@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
10977     if (!vma_priv)
10978     return -ENOMEM;
10979    
10980     - vma_priv->n_pages = count;
10981     - count = 0;
10982     - for (i = 0; i < vma_priv->n_pages; i++) {
10983     + for (i = 0; i < count; i++) {
10984     vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
10985     if (!vma_priv->pages[i])
10986     break;
10987     - count++;
10988     + vma_priv->n_pages++;
10989     }
10990    
10991     mutex_lock(&file_priv->lock);
10992    
10993     - file_priv->allocated += count;
10994     -
10995     vma_priv->file_priv = file_priv;
10996     vma_priv->users = 1;
10997    
10998     diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
10999     index a6f9ba85dc4b..aa081f806728 100644
11000     --- a/drivers/xen/swiotlb-xen.c
11001     +++ b/drivers/xen/swiotlb-xen.c
11002     @@ -303,6 +303,9 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
11003     */
11004     flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
11005    
11006     + /* Convert the size to actually allocated. */
11007     + size = 1UL << (order + XEN_PAGE_SHIFT);
11008     +
11009     /* On ARM this function returns an ioremap'ped virtual address for
11010     * which virt_to_phys doesn't return the corresponding physical
11011     * address. In fact on ARM virt_to_phys only works for kernel direct
11012     @@ -351,6 +354,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
11013     * physical address */
11014     phys = xen_bus_to_phys(dev_addr);
11015    
11016     + /* Convert the size to actually allocated. */
11017     + size = 1UL << (order + XEN_PAGE_SHIFT);
11018     +
11019     if (((dev_addr + size - 1 <= dma_mask)) ||
11020     range_straddles_page_boundary(phys, size))
11021     xen_destroy_contiguous_region(phys, order);
11022     diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
11023     index 63c1494a8d73..2acbfe104e46 100644
11024     --- a/drivers/xen/xen-balloon.c
11025     +++ b/drivers/xen/xen-balloon.c
11026     @@ -76,12 +76,15 @@ static void watch_target(struct xenbus_watch *watch,
11027    
11028     if (!watch_fired) {
11029     watch_fired = true;
11030     - err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
11031     - &static_max);
11032     - if (err != 1)
11033     - static_max = new_target;
11034     - else
11035     +
11036     + if ((xenbus_scanf(XBT_NIL, "memory", "static-max",
11037     + "%llu", &static_max) == 1) ||
11038     + (xenbus_scanf(XBT_NIL, "memory", "memory_static_max",
11039     + "%llu", &static_max) == 1))
11040     static_max >>= PAGE_SHIFT - 10;
11041     + else
11042     + static_max = new_target;
11043     +
11044     target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
11045     : static_max - balloon_stats.target_pages;
11046     }
11047     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
11048     index d436fb4c002e..089b46c4d97f 100644
11049     --- a/fs/btrfs/ctree.c
11050     +++ b/fs/btrfs/ctree.c
11051     @@ -1050,9 +1050,26 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
11052     if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
11053     parent_start = parent->start;
11054    
11055     + /*
11056     + * If we are COWing a node/leaf from the extent, chunk or device trees,
11057     + * make sure that we do not finish block group creation of pending block
11058     + * groups. We do this to avoid a deadlock.
11059     + * COWing can result in allocation of a new chunk, and flushing pending
11060     + * block groups (btrfs_create_pending_block_groups()) can be triggered
11061     + * when finishing allocation of a new chunk. Creation of a pending block
11062     + * group modifies the extent, chunk and device trees, therefore we could
11063     + * deadlock with ourselves since we are holding a lock on an extent
11064     + * buffer that btrfs_create_pending_block_groups() may try to COW later.
11065     + */
11066     + if (root == fs_info->extent_root ||
11067     + root == fs_info->chunk_root ||
11068     + root == fs_info->dev_root)
11069     + trans->can_flush_pending_bgs = false;
11070     +
11071     cow = btrfs_alloc_tree_block(trans, root, parent_start,
11072     root->root_key.objectid, &disk_key, level,
11073     search_start, empty_size);
11074     + trans->can_flush_pending_bgs = true;
11075     if (IS_ERR(cow))
11076     return PTR_ERR(cow);
11077    
11078     diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
11079     index dec01970d8c5..981434764bb9 100644
11080     --- a/fs/btrfs/dev-replace.c
11081     +++ b/fs/btrfs/dev-replace.c
11082     @@ -440,6 +440,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
11083     break;
11084     case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
11085     case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
11086     + ASSERT(0);
11087     ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
11088     goto leave;
11089     }
11090     @@ -482,6 +483,10 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
11091     if (IS_ERR(trans)) {
11092     ret = PTR_ERR(trans);
11093     btrfs_dev_replace_write_lock(dev_replace);
11094     + dev_replace->replace_state =
11095     + BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
11096     + dev_replace->srcdev = NULL;
11097     + dev_replace->tgtdev = NULL;
11098     goto leave;
11099     }
11100    
11101     @@ -503,8 +508,6 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
11102     return ret;
11103    
11104     leave:
11105     - dev_replace->srcdev = NULL;
11106     - dev_replace->tgtdev = NULL;
11107     btrfs_dev_replace_write_unlock(dev_replace);
11108     btrfs_destroy_dev_replace_tgtdev(tgt_device);
11109     return ret;
11110     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
11111     index 2d9074295d7f..51e41e53d4ae 100644
11112     --- a/fs/btrfs/extent-tree.c
11113     +++ b/fs/btrfs/extent-tree.c
11114     @@ -2366,6 +2366,9 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
11115     insert_reserved);
11116     else
11117     BUG();
11118     + if (ret && insert_reserved)
11119     + btrfs_pin_extent(trans->fs_info, node->bytenr,
11120     + node->num_bytes, 1);
11121     return ret;
11122     }
11123    
11124     @@ -2911,7 +2914,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
11125     struct btrfs_delayed_ref_head *head;
11126     int ret;
11127     int run_all = count == (unsigned long)-1;
11128     - bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
11129    
11130     /* We'll clean this up in btrfs_cleanup_transaction */
11131     if (trans->aborted)
11132     @@ -2928,7 +2930,6 @@ again:
11133     #ifdef SCRAMBLE_DELAYED_REFS
11134     delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
11135     #endif
11136     - trans->can_flush_pending_bgs = false;
11137     ret = __btrfs_run_delayed_refs(trans, count);
11138     if (ret < 0) {
11139     btrfs_abort_transaction(trans, ret);
11140     @@ -2959,7 +2960,6 @@ again:
11141     goto again;
11142     }
11143     out:
11144     - trans->can_flush_pending_bgs = can_flush_pending_bgs;
11145     return 0;
11146     }
11147    
11148     @@ -4533,6 +4533,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
11149     goto out;
11150     } else {
11151     ret = 1;
11152     + space_info->max_extent_size = 0;
11153     }
11154    
11155     space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
11156     @@ -4554,11 +4555,9 @@ out:
11157     * the block groups that were made dirty during the lifetime of the
11158     * transaction.
11159     */
11160     - if (trans->can_flush_pending_bgs &&
11161     - trans->chunk_bytes_reserved >= (u64)SZ_2M) {
11162     + if (trans->chunk_bytes_reserved >= (u64)SZ_2M)
11163     btrfs_create_pending_block_groups(trans);
11164     - btrfs_trans_release_chunk_metadata(trans);
11165     - }
11166     +
11167     return ret;
11168     }
11169    
11170     @@ -6436,6 +6435,7 @@ static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache,
11171     space_info->bytes_readonly += num_bytes;
11172     cache->reserved -= num_bytes;
11173     space_info->bytes_reserved -= num_bytes;
11174     + space_info->max_extent_size = 0;
11175    
11176     if (delalloc)
11177     cache->delalloc_bytes -= num_bytes;
11178     @@ -7233,6 +7233,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
11179     struct btrfs_block_group_cache *block_group = NULL;
11180     u64 search_start = 0;
11181     u64 max_extent_size = 0;
11182     + u64 max_free_space = 0;
11183     u64 empty_cluster = 0;
11184     struct btrfs_space_info *space_info;
11185     int loop = 0;
11186     @@ -7528,8 +7529,8 @@ unclustered_alloc:
11187     spin_lock(&ctl->tree_lock);
11188     if (ctl->free_space <
11189     num_bytes + empty_cluster + empty_size) {
11190     - if (ctl->free_space > max_extent_size)
11191     - max_extent_size = ctl->free_space;
11192     + max_free_space = max(max_free_space,
11193     + ctl->free_space);
11194     spin_unlock(&ctl->tree_lock);
11195     goto loop;
11196     }
11197     @@ -7696,6 +7697,8 @@ loop:
11198     }
11199     out:
11200     if (ret == -ENOSPC) {
11201     + if (!max_extent_size)
11202     + max_extent_size = max_free_space;
11203     spin_lock(&space_info->lock);
11204     space_info->max_extent_size = max_extent_size;
11205     spin_unlock(&space_info->lock);
11206     @@ -7977,21 +7980,14 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
11207     }
11208    
11209     path = btrfs_alloc_path();
11210     - if (!path) {
11211     - btrfs_free_and_pin_reserved_extent(fs_info,
11212     - extent_key.objectid,
11213     - fs_info->nodesize);
11214     + if (!path)
11215     return -ENOMEM;
11216     - }
11217    
11218     path->leave_spinning = 1;
11219     ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
11220     &extent_key, size);
11221     if (ret) {
11222     btrfs_free_path(path);
11223     - btrfs_free_and_pin_reserved_extent(fs_info,
11224     - extent_key.objectid,
11225     - fs_info->nodesize);
11226     return ret;
11227     }
11228    
11229     @@ -8119,6 +8115,19 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
11230     if (IS_ERR(buf))
11231     return buf;
11232    
11233     + /*
11234     + * Extra safety check in case the extent tree is corrupted and extent
11235     + * allocator chooses to use a tree block which is already used and
11236     + * locked.
11237     + */
11238     + if (buf->lock_owner == current->pid) {
11239     + btrfs_err_rl(fs_info,
11240     +"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
11241     + buf->start, btrfs_header_owner(buf), current->pid);
11242     + free_extent_buffer(buf);
11243     + return ERR_PTR(-EUCLEAN);
11244     + }
11245     +
11246     btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
11247     btrfs_tree_lock(buf);
11248     clean_tree_block(fs_info, buf);
11249     @@ -8763,15 +8772,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
11250     if (eb == root->node) {
11251     if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
11252     parent = eb->start;
11253     - else
11254     - BUG_ON(root->root_key.objectid !=
11255     - btrfs_header_owner(eb));
11256     + else if (root->root_key.objectid != btrfs_header_owner(eb))
11257     + goto owner_mismatch;
11258     } else {
11259     if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
11260     parent = path->nodes[level + 1]->start;
11261     - else
11262     - BUG_ON(root->root_key.objectid !=
11263     - btrfs_header_owner(path->nodes[level + 1]));
11264     + else if (root->root_key.objectid !=
11265     + btrfs_header_owner(path->nodes[level + 1]))
11266     + goto owner_mismatch;
11267     }
11268    
11269     btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
11270     @@ -8779,6 +8787,11 @@ out:
11271     wc->refs[level] = 0;
11272     wc->flags[level] = 0;
11273     return 0;
11274     +
11275     +owner_mismatch:
11276     + btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
11277     + btrfs_header_owner(eb), root->root_key.objectid);
11278     + return -EUCLEAN;
11279     }
11280    
11281     static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
11282     @@ -8832,6 +8845,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
11283     ret = walk_up_proc(trans, root, path, wc);
11284     if (ret > 0)
11285     return 0;
11286     + if (ret < 0)
11287     + return ret;
11288    
11289     if (path->locks[level]) {
11290     btrfs_tree_unlock_rw(path->nodes[level],
11291     @@ -9613,6 +9628,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
11292    
11293     block_group = btrfs_lookup_first_block_group(info, last);
11294     while (block_group) {
11295     + wait_block_group_cache_done(block_group);
11296     spin_lock(&block_group->lock);
11297     if (block_group->iref)
11298     break;
11299     @@ -10074,15 +10090,19 @@ error:
11300     void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
11301     {
11302     struct btrfs_fs_info *fs_info = trans->fs_info;
11303     - struct btrfs_block_group_cache *block_group, *tmp;
11304     + struct btrfs_block_group_cache *block_group;
11305     struct btrfs_root *extent_root = fs_info->extent_root;
11306     struct btrfs_block_group_item item;
11307     struct btrfs_key key;
11308     int ret = 0;
11309     - bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
11310    
11311     - trans->can_flush_pending_bgs = false;
11312     - list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
11313     + if (!trans->can_flush_pending_bgs)
11314     + return;
11315     +
11316     + while (!list_empty(&trans->new_bgs)) {
11317     + block_group = list_first_entry(&trans->new_bgs,
11318     + struct btrfs_block_group_cache,
11319     + bg_list);
11320     if (ret)
11321     goto next;
11322    
11323     @@ -10103,7 +10123,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
11324     next:
11325     list_del_init(&block_group->bg_list);
11326     }
11327     - trans->can_flush_pending_bgs = can_flush_pending_bgs;
11328     + btrfs_trans_release_chunk_metadata(trans);
11329     }
11330    
11331     int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
11332     @@ -10753,14 +10773,16 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
11333     * We don't want a transaction for this since the discard may take a
11334     * substantial amount of time. We don't require that a transaction be
11335     * running, but we do need to take a running transaction into account
11336     - * to ensure that we're not discarding chunks that were released in
11337     - * the current transaction.
11338     + * to ensure that we're not discarding chunks that were released or
11339     + * allocated in the current transaction.
11340     *
11341     * Holding the chunks lock will prevent other threads from allocating
11342     * or releasing chunks, but it won't prevent a running transaction
11343     * from committing and releasing the memory that the pending chunks
11344     * list head uses. For that, we need to take a reference to the
11345     - * transaction.
11346     + * transaction and hold the commit root sem. We only need to hold
11347     + * it while performing the free space search since we have already
11348     + * held back allocations.
11349     */
11350     static int btrfs_trim_free_extents(struct btrfs_device *device,
11351     u64 minlen, u64 *trimmed)
11352     @@ -10770,6 +10792,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11353    
11354     *trimmed = 0;
11355    
11356     + /* Discard not supported = nothing to do. */
11357     + if (!blk_queue_discard(bdev_get_queue(device->bdev)))
11358     + return 0;
11359     +
11360     /* Not writeable = nothing to do. */
11361     if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
11362     return 0;
11363     @@ -10787,9 +10813,13 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11364    
11365     ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
11366     if (ret)
11367     - return ret;
11368     + break;
11369    
11370     - down_read(&fs_info->commit_root_sem);
11371     + ret = down_read_killable(&fs_info->commit_root_sem);
11372     + if (ret) {
11373     + mutex_unlock(&fs_info->chunk_mutex);
11374     + break;
11375     + }
11376    
11377     spin_lock(&fs_info->trans_lock);
11378     trans = fs_info->running_transaction;
11379     @@ -10797,13 +10827,17 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11380     refcount_inc(&trans->use_count);
11381     spin_unlock(&fs_info->trans_lock);
11382    
11383     + if (!trans)
11384     + up_read(&fs_info->commit_root_sem);
11385     +
11386     ret = find_free_dev_extent_start(trans, device, minlen, start,
11387     &start, &len);
11388     - if (trans)
11389     + if (trans) {
11390     + up_read(&fs_info->commit_root_sem);
11391     btrfs_put_transaction(trans);
11392     + }
11393    
11394     if (ret) {
11395     - up_read(&fs_info->commit_root_sem);
11396     mutex_unlock(&fs_info->chunk_mutex);
11397     if (ret == -ENOSPC)
11398     ret = 0;
11399     @@ -10811,7 +10845,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11400     }
11401    
11402     ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
11403     - up_read(&fs_info->commit_root_sem);
11404     mutex_unlock(&fs_info->chunk_mutex);
11405    
11406     if (ret)
11407     @@ -10831,6 +10864,15 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
11408     return ret;
11409     }
11410    
11411     +/*
11412     + * Trim the whole filesystem by:
11413     + * 1) trimming the free space in each block group
11414     + * 2) trimming the unallocated space on each device
11415     + *
11416     + * This will also continue trimming even if a block group or device encounters
11417     + * an error. The return value will be the last error, or 0 if nothing bad
11418     + * happens.
11419     + */
11420     int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
11421     {
11422     struct btrfs_block_group_cache *cache = NULL;
11423     @@ -10840,18 +10882,14 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
11424     u64 start;
11425     u64 end;
11426     u64 trimmed = 0;
11427     - u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
11428     + u64 bg_failed = 0;
11429     + u64 dev_failed = 0;
11430     + int bg_ret = 0;
11431     + int dev_ret = 0;
11432     int ret = 0;
11433    
11434     - /*
11435     - * try to trim all FS space, our block group may start from non-zero.
11436     - */
11437     - if (range->len == total_bytes)
11438     - cache = btrfs_lookup_first_block_group(fs_info, range->start);
11439     - else
11440     - cache = btrfs_lookup_block_group(fs_info, range->start);
11441     -
11442     - while (cache) {
11443     + cache = btrfs_lookup_first_block_group(fs_info, range->start);
11444     + for (; cache; cache = next_block_group(fs_info, cache)) {
11445     if (cache->key.objectid >= (range->start + range->len)) {
11446     btrfs_put_block_group(cache);
11447     break;
11448     @@ -10865,13 +10903,15 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
11449     if (!block_group_cache_done(cache)) {
11450     ret = cache_block_group(cache, 0);
11451     if (ret) {
11452     - btrfs_put_block_group(cache);
11453     - break;
11454     + bg_failed++;
11455     + bg_ret = ret;
11456     + continue;
11457     }
11458     ret = wait_block_group_cache_done(cache);
11459     if (ret) {
11460     - btrfs_put_block_group(cache);
11461     - break;
11462     + bg_failed++;
11463     + bg_ret = ret;
11464     + continue;
11465     }
11466     }
11467     ret = btrfs_trim_block_group(cache,
11468     @@ -10882,28 +10922,40 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
11469    
11470     trimmed += group_trimmed;
11471     if (ret) {
11472     - btrfs_put_block_group(cache);
11473     - break;
11474     + bg_failed++;
11475     + bg_ret = ret;
11476     + continue;
11477     }
11478     }
11479     -
11480     - cache = next_block_group(fs_info, cache);
11481     }
11482    
11483     + if (bg_failed)
11484     + btrfs_warn(fs_info,
11485     + "failed to trim %llu block group(s), last error %d",
11486     + bg_failed, bg_ret);
11487     mutex_lock(&fs_info->fs_devices->device_list_mutex);
11488     - devices = &fs_info->fs_devices->alloc_list;
11489     - list_for_each_entry(device, devices, dev_alloc_list) {
11490     + devices = &fs_info->fs_devices->devices;
11491     + list_for_each_entry(device, devices, dev_list) {
11492     ret = btrfs_trim_free_extents(device, range->minlen,
11493     &group_trimmed);
11494     - if (ret)
11495     + if (ret) {
11496     + dev_failed++;
11497     + dev_ret = ret;
11498     break;
11499     + }
11500    
11501     trimmed += group_trimmed;
11502     }
11503     mutex_unlock(&fs_info->fs_devices->device_list_mutex);
11504    
11505     + if (dev_failed)
11506     + btrfs_warn(fs_info,
11507     + "failed to trim %llu device(s), last error %d",
11508     + dev_failed, dev_ret);
11509     range->len = trimmed;
11510     - return ret;
11511     + if (bg_ret)
11512     + return bg_ret;
11513     + return dev_ret;
11514     }
11515    
11516     /*
11517     diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
11518     index 2be00e873e92..7d81cc415264 100644
11519     --- a/fs/btrfs/file.c
11520     +++ b/fs/btrfs/file.c
11521     @@ -531,6 +531,14 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
11522    
11523     end_of_last_block = start_pos + num_bytes - 1;
11524    
11525     + /*
11526     + * The pages may have already been dirty, clear out old accounting so
11527     + * we can set things up properly
11528     + */
11529     + clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block,
11530     + EXTENT_DIRTY | EXTENT_DELALLOC |
11531     + EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached);
11532     +
11533     if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
11534     if (start_pos >= isize &&
11535     !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
11536     @@ -1500,18 +1508,27 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
11537     }
11538     if (ordered)
11539     btrfs_put_ordered_extent(ordered);
11540     - clear_extent_bit(&inode->io_tree, start_pos, last_pos,
11541     - EXTENT_DIRTY | EXTENT_DELALLOC |
11542     - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
11543     - 0, 0, cached_state);
11544     +
11545     *lockstart = start_pos;
11546     *lockend = last_pos;
11547     ret = 1;
11548     }
11549    
11550     + /*
11551     + * It's possible the pages are dirty right now, but we don't want
11552     + * to clean them yet because copy_from_user may catch a page fault
11553     + * and we might have to fall back to one page at a time. If that
11554     + * happens, we'll unlock these pages and we'd have a window where
11555     + * reclaim could sneak in and drop the once-dirty page on the floor
11556     + * without writing it.
11557     + *
11558     + * We have the pages locked and the extent range locked, so there's
11559     + * no way someone can start IO on any dirty pages in this range.
11560     + *
11561     + * We'll call btrfs_dirty_pages() later on, and that will flip around
11562     + * delalloc bits and dirty the pages as required.
11563     + */
11564     for (i = 0; i < num_pages; i++) {
11565     - if (clear_page_dirty_for_io(pages[i]))
11566     - account_page_redirty(pages[i]);
11567     set_page_extent_mapped(pages[i]);
11568     WARN_ON(!PageLocked(pages[i]));
11569     }
11570     @@ -2061,6 +2078,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
11571     goto out;
11572    
11573     inode_lock(inode);
11574     +
11575     + /*
11576     + * We take the dio_sem here because the tree log stuff can race with
11577     + * lockless dio writes and get an extent map logged for an extent we
11578     + * never waited on. We need it this high up for lockdep reasons.
11579     + */
11580     + down_write(&BTRFS_I(inode)->dio_sem);
11581     +
11582     atomic_inc(&root->log_batch);
11583    
11584     /*
11585     @@ -2069,6 +2094,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
11586     */
11587     ret = btrfs_wait_ordered_range(inode, start, len);
11588     if (ret) {
11589     + up_write(&BTRFS_I(inode)->dio_sem);
11590     inode_unlock(inode);
11591     goto out;
11592     }
11593     @@ -2092,6 +2118,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
11594     * checked called fsync.
11595     */
11596     ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
11597     + up_write(&BTRFS_I(inode)->dio_sem);
11598     inode_unlock(inode);
11599     goto out;
11600     }
11601     @@ -2110,6 +2137,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
11602     trans = btrfs_start_transaction(root, 0);
11603     if (IS_ERR(trans)) {
11604     ret = PTR_ERR(trans);
11605     + up_write(&BTRFS_I(inode)->dio_sem);
11606     inode_unlock(inode);
11607     goto out;
11608     }
11609     @@ -2131,6 +2159,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
11610     * file again, but that will end up using the synchronization
11611     * inside btrfs_sync_log to keep things safe.
11612     */
11613     + up_write(&BTRFS_I(inode)->dio_sem);
11614     inode_unlock(inode);
11615    
11616     /*
11617     diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
11618     index 0adf38b00fa0..8ecf8c0e5fe6 100644
11619     --- a/fs/btrfs/free-space-cache.c
11620     +++ b/fs/btrfs/free-space-cache.c
11621     @@ -10,6 +10,7 @@
11622     #include <linux/math64.h>
11623     #include <linux/ratelimit.h>
11624     #include <linux/error-injection.h>
11625     +#include <linux/sched/mm.h>
11626     #include "ctree.h"
11627     #include "free-space-cache.h"
11628     #include "transaction.h"
11629     @@ -47,6 +48,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
11630     struct btrfs_free_space_header *header;
11631     struct extent_buffer *leaf;
11632     struct inode *inode = NULL;
11633     + unsigned nofs_flag;
11634     int ret;
11635    
11636     key.objectid = BTRFS_FREE_SPACE_OBJECTID;
11637     @@ -68,7 +70,13 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
11638     btrfs_disk_key_to_cpu(&location, &disk_key);
11639     btrfs_release_path(path);
11640    
11641     + /*
11642     + * We are often under a trans handle at this point, so we need to make
11643     + * sure NOFS is set to keep us from deadlocking.
11644     + */
11645     + nofs_flag = memalloc_nofs_save();
11646     inode = btrfs_iget(fs_info->sb, &location, root, NULL);
11647     + memalloc_nofs_restore(nofs_flag);
11648     if (IS_ERR(inode))
11649     return inode;
11650    
11651     @@ -1679,6 +1687,8 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
11652     bitmap_clear(info->bitmap, start, count);
11653    
11654     info->bytes -= bytes;
11655     + if (info->max_extent_size > ctl->unit)
11656     + info->max_extent_size = 0;
11657     }
11658    
11659     static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
11660     @@ -1762,6 +1772,13 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
11661     return -1;
11662     }
11663    
11664     +static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
11665     +{
11666     + if (entry->bitmap)
11667     + return entry->max_extent_size;
11668     + return entry->bytes;
11669     +}
11670     +
11671     /* Cache the size of the max extent in bytes */
11672     static struct btrfs_free_space *
11673     find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
11674     @@ -1783,8 +1800,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
11675     for (node = &entry->offset_index; node; node = rb_next(node)) {
11676     entry = rb_entry(node, struct btrfs_free_space, offset_index);
11677     if (entry->bytes < *bytes) {
11678     - if (entry->bytes > *max_extent_size)
11679     - *max_extent_size = entry->bytes;
11680     + *max_extent_size = max(get_max_extent_size(entry),
11681     + *max_extent_size);
11682     continue;
11683     }
11684    
11685     @@ -1802,8 +1819,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
11686     }
11687    
11688     if (entry->bytes < *bytes + align_off) {
11689     - if (entry->bytes > *max_extent_size)
11690     - *max_extent_size = entry->bytes;
11691     + *max_extent_size = max(get_max_extent_size(entry),
11692     + *max_extent_size);
11693     continue;
11694     }
11695    
11696     @@ -1815,8 +1832,10 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
11697     *offset = tmp;
11698     *bytes = size;
11699     return entry;
11700     - } else if (size > *max_extent_size) {
11701     - *max_extent_size = size;
11702     + } else {
11703     + *max_extent_size =
11704     + max(get_max_extent_size(entry),
11705     + *max_extent_size);
11706     }
11707     continue;
11708     }
11709     @@ -2440,6 +2459,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
11710     struct rb_node *n;
11711     int count = 0;
11712    
11713     + spin_lock(&ctl->tree_lock);
11714     for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
11715     info = rb_entry(n, struct btrfs_free_space, offset_index);
11716     if (info->bytes >= bytes && !block_group->ro)
11717     @@ -2448,6 +2468,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
11718     info->offset, info->bytes,
11719     (info->bitmap) ? "yes" : "no");
11720     }
11721     + spin_unlock(&ctl->tree_lock);
11722     btrfs_info(fs_info, "block group has cluster?: %s",
11723     list_empty(&block_group->cluster_list) ? "no" : "yes");
11724     btrfs_info(fs_info,
11725     @@ -2676,8 +2697,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
11726    
11727     err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
11728     if (err) {
11729     - if (search_bytes > *max_extent_size)
11730     - *max_extent_size = search_bytes;
11731     + *max_extent_size = max(get_max_extent_size(entry),
11732     + *max_extent_size);
11733     return 0;
11734     }
11735    
11736     @@ -2714,8 +2735,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
11737    
11738     entry = rb_entry(node, struct btrfs_free_space, offset_index);
11739     while (1) {
11740     - if (entry->bytes < bytes && entry->bytes > *max_extent_size)
11741     - *max_extent_size = entry->bytes;
11742     + if (entry->bytes < bytes)
11743     + *max_extent_size = max(get_max_extent_size(entry),
11744     + *max_extent_size);
11745    
11746     if (entry->bytes < bytes ||
11747     (!entry->bitmap && entry->offset < min_start)) {
11748     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
11749     index 3ea5339603cf..83268d8f48c4 100644
11750     --- a/fs/btrfs/inode.c
11751     +++ b/fs/btrfs/inode.c
11752     @@ -503,6 +503,7 @@ again:
11753     pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
11754     if (!pages) {
11755     /* just bail out to the uncompressed code */
11756     + nr_pages = 0;
11757     goto cont;
11758     }
11759    
11760     @@ -2944,6 +2945,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
11761     bool truncated = false;
11762     bool range_locked = false;
11763     bool clear_new_delalloc_bytes = false;
11764     + bool clear_reserved_extent = true;
11765    
11766     if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
11767     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) &&
11768     @@ -3047,10 +3049,12 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
11769     logical_len, logical_len,
11770     compress_type, 0, 0,
11771     BTRFS_FILE_EXTENT_REG);
11772     - if (!ret)
11773     + if (!ret) {
11774     + clear_reserved_extent = false;
11775     btrfs_release_delalloc_bytes(fs_info,
11776     ordered_extent->start,
11777     ordered_extent->disk_len);
11778     + }
11779     }
11780     unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
11781     ordered_extent->file_offset, ordered_extent->len,
11782     @@ -3111,8 +3115,13 @@ out:
11783     * wrong we need to return the space for this ordered extent
11784     * back to the allocator. We only free the extent in the
11785     * truncated case if we didn't write out the extent at all.
11786     + *
11787     + * If we made it past insert_reserved_file_extent before we
11788     + * errored out then we don't need to do this as the accounting
11789     + * has already been done.
11790     */
11791     if ((ret || !logical_len) &&
11792     + clear_reserved_extent &&
11793     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
11794     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
11795     btrfs_free_reserved_extent(fs_info,
11796     @@ -5274,11 +5283,13 @@ static void evict_inode_truncate_pages(struct inode *inode)
11797     struct extent_state *cached_state = NULL;
11798     u64 start;
11799     u64 end;
11800     + unsigned state_flags;
11801    
11802     node = rb_first(&io_tree->state);
11803     state = rb_entry(node, struct extent_state, rb_node);
11804     start = state->start;
11805     end = state->end;
11806     + state_flags = state->state;
11807     spin_unlock(&io_tree->lock);
11808    
11809     lock_extent_bits(io_tree, start, end, &cached_state);
11810     @@ -5291,7 +5302,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
11811     *
11812     * Note, end is the bytenr of last byte, so we need + 1 here.
11813     */
11814     - if (state->state & EXTENT_DELALLOC)
11815     + if (state_flags & EXTENT_DELALLOC)
11816     btrfs_qgroup_free_data(inode, NULL, start, end - start + 1);
11817    
11818     clear_extent_bit(io_tree, start, end,
11819     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
11820     index d60b6caf09e8..bd4767f562cd 100644
11821     --- a/fs/btrfs/ioctl.c
11822     +++ b/fs/btrfs/ioctl.c
11823     @@ -491,7 +491,6 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
11824     struct fstrim_range range;
11825     u64 minlen = ULLONG_MAX;
11826     u64 num_devices = 0;
11827     - u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
11828     int ret;
11829    
11830     if (!capable(CAP_SYS_ADMIN))
11831     @@ -515,11 +514,15 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
11832     return -EOPNOTSUPP;
11833     if (copy_from_user(&range, arg, sizeof(range)))
11834     return -EFAULT;
11835     - if (range.start > total_bytes ||
11836     - range.len < fs_info->sb->s_blocksize)
11837     +
11838     + /*
11839     + * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
11840     + * block group is in the logical address space, which can be any
11841     + * sectorsize aligned bytenr in the range [0, U64_MAX].
11842     + */
11843     + if (range.len < fs_info->sb->s_blocksize)
11844     return -EINVAL;
11845    
11846     - range.len = min(range.len, total_bytes - range.start);
11847     range.minlen = max(range.minlen, minlen);
11848     ret = btrfs_trim_fs(fs_info, &range);
11849     if (ret < 0)
11850     diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
11851     index d4917c0cddf5..b070401406be 100644
11852     --- a/fs/btrfs/qgroup.c
11853     +++ b/fs/btrfs/qgroup.c
11854     @@ -2897,6 +2897,7 @@ qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
11855     qgroup->rfer_cmpr = 0;
11856     qgroup->excl = 0;
11857     qgroup->excl_cmpr = 0;
11858     + qgroup_dirty(fs_info, qgroup);
11859     }
11860     spin_unlock(&fs_info->qgroup_lock);
11861     }
11862     @@ -3106,6 +3107,10 @@ static int __btrfs_qgroup_release_data(struct inode *inode,
11863     int trace_op = QGROUP_RELEASE;
11864     int ret;
11865    
11866     + if (!test_bit(BTRFS_FS_QUOTA_ENABLED,
11867     + &BTRFS_I(inode)->root->fs_info->flags))
11868     + return 0;
11869     +
11870     /* In release case, we shouldn't have @reserved */
11871     WARN_ON(!free && reserved);
11872     if (free && reserved)
11873     diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
11874     index 54b8bb282c0e..4bbcc1e92a93 100644
11875     --- a/fs/btrfs/qgroup.h
11876     +++ b/fs/btrfs/qgroup.h
11877     @@ -249,6 +249,8 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
11878     static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
11879     u64 ref_root, u64 num_bytes)
11880     {
11881     + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
11882     + return;
11883     trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
11884     btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes,
11885     BTRFS_QGROUP_RSV_DATA);
11886     diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
11887     index 8783a1776540..60bf8dfe7df4 100644
11888     --- a/fs/btrfs/relocation.c
11889     +++ b/fs/btrfs/relocation.c
11890     @@ -1281,7 +1281,7 @@ static void __del_reloc_root(struct btrfs_root *root)
11891     struct mapping_node *node = NULL;
11892     struct reloc_control *rc = fs_info->reloc_ctl;
11893    
11894     - if (rc) {
11895     + if (rc && root->node) {
11896     spin_lock(&rc->reloc_root_tree.lock);
11897     rb_node = tree_search(&rc->reloc_root_tree.rb_root,
11898     root->node->start);
11899     diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
11900     index 3b84f5015029..bb8f6c020d22 100644
11901     --- a/fs/btrfs/transaction.c
11902     +++ b/fs/btrfs/transaction.c
11903     @@ -1929,6 +1929,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
11904     return ret;
11905     }
11906    
11907     + btrfs_trans_release_metadata(trans);
11908     + trans->block_rsv = NULL;
11909     +
11910     /* make a pass through all the delayed refs we have so far
11911     * any runnings procs may add more while we are here
11912     */
11913     @@ -1938,9 +1941,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
11914     return ret;
11915     }
11916    
11917     - btrfs_trans_release_metadata(trans);
11918     - trans->block_rsv = NULL;
11919     -
11920     cur_trans = trans->transaction;
11921    
11922     /*
11923     @@ -2280,15 +2280,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
11924    
11925     kmem_cache_free(btrfs_trans_handle_cachep, trans);
11926    
11927     - /*
11928     - * If fs has been frozen, we can not handle delayed iputs, otherwise
11929     - * it'll result in deadlock about SB_FREEZE_FS.
11930     - */
11931     - if (current != fs_info->transaction_kthread &&
11932     - current != fs_info->cleaner_kthread &&
11933     - !test_bit(BTRFS_FS_FROZEN, &fs_info->flags))
11934     - btrfs_run_delayed_iputs(fs_info);
11935     -
11936     return ret;
11937    
11938     scrub_continue:
11939     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
11940     index 3c2ae0e4f25a..d0bcfbfc0e3a 100644
11941     --- a/fs/btrfs/tree-log.c
11942     +++ b/fs/btrfs/tree-log.c
11943     @@ -258,6 +258,13 @@ struct walk_control {
11944     /* what stage of the replay code we're currently in */
11945     int stage;
11946    
11947     + /*
11948     + * Ignore any items from the inode currently being processed. Needs
11949     + * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
11950     + * the LOG_WALK_REPLAY_INODES stage.
11951     + */
11952     + bool ignore_cur_inode;
11953     +
11954     /* the root we are currently replaying */
11955     struct btrfs_root *replay_dest;
11956    
11957     @@ -2487,6 +2494,20 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
11958    
11959     inode_item = btrfs_item_ptr(eb, i,
11960     struct btrfs_inode_item);
11961     + /*
11962     + * If we have a tmpfile (O_TMPFILE) that got fsync'ed
11963     + * and never got linked before the fsync, skip it, as
11964     + * replaying it is pointless since it would be deleted
11965     + * later. We skip logging tmpfiles, but it's always
11966     + * possible we are replaying a log created with a kernel
11967     + * that used to log tmpfiles.
11968     + */
11969     + if (btrfs_inode_nlink(eb, inode_item) == 0) {
11970     + wc->ignore_cur_inode = true;
11971     + continue;
11972     + } else {
11973     + wc->ignore_cur_inode = false;
11974     + }
11975     ret = replay_xattr_deletes(wc->trans, root, log,
11976     path, key.objectid);
11977     if (ret)
11978     @@ -2524,16 +2545,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
11979     root->fs_info->sectorsize);
11980     ret = btrfs_drop_extents(wc->trans, root, inode,
11981     from, (u64)-1, 1);
11982     - /*
11983     - * If the nlink count is zero here, the iput
11984     - * will free the inode. We bump it to make
11985     - * sure it doesn't get freed until the link
11986     - * count fixup is done.
11987     - */
11988     if (!ret) {
11989     - if (inode->i_nlink == 0)
11990     - inc_nlink(inode);
11991     - /* Update link count and nbytes. */
11992     + /* Update the inode's nbytes. */
11993     ret = btrfs_update_inode(wc->trans,
11994     root, inode);
11995     }
11996     @@ -2548,6 +2561,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
11997     break;
11998     }
11999    
12000     + if (wc->ignore_cur_inode)
12001     + continue;
12002     +
12003     if (key.type == BTRFS_DIR_INDEX_KEY &&
12004     wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
12005     ret = replay_one_dir_item(wc->trans, root, path,
12006     @@ -3196,9 +3212,12 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
12007     };
12008    
12009     ret = walk_log_tree(trans, log, &wc);
12010     - /* I don't think this can happen but just in case */
12011     - if (ret)
12012     - btrfs_abort_transaction(trans, ret);
12013     + if (ret) {
12014     + if (trans)
12015     + btrfs_abort_transaction(trans, ret);
12016     + else
12017     + btrfs_handle_fs_error(log->fs_info, ret, NULL);
12018     + }
12019    
12020     while (1) {
12021     ret = find_first_extent_bit(&log->dirty_log_pages,
12022     @@ -4374,7 +4393,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
12023    
12024     INIT_LIST_HEAD(&extents);
12025    
12026     - down_write(&inode->dio_sem);
12027     write_lock(&tree->lock);
12028     test_gen = root->fs_info->last_trans_committed;
12029     logged_start = start;
12030     @@ -4440,7 +4458,6 @@ process:
12031     }
12032     WARN_ON(!list_empty(&extents));
12033     write_unlock(&tree->lock);
12034     - up_write(&inode->dio_sem);
12035    
12036     btrfs_release_path(path);
12037     if (!ret)
12038     @@ -4636,7 +4653,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
12039     ASSERT(len == i_size ||
12040     (len == fs_info->sectorsize &&
12041     btrfs_file_extent_compression(leaf, extent) !=
12042     - BTRFS_COMPRESS_NONE));
12043     + BTRFS_COMPRESS_NONE) ||
12044     + (len < i_size && i_size < fs_info->sectorsize));
12045     return 0;
12046     }
12047    
12048     @@ -5564,9 +5582,33 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
12049    
12050     dir_inode = btrfs_iget(fs_info->sb, &inode_key,
12051     root, NULL);
12052     - /* If parent inode was deleted, skip it. */
12053     - if (IS_ERR(dir_inode))
12054     - continue;
12055     + /*
12056     + * If the parent inode was deleted, return an error to
12057     + * fallback to a transaction commit. This is to prevent
12058     + * getting an inode that was moved from one parent A to
12059     + * a parent B, got its former parent A deleted and then
12060     + * it got fsync'ed, from existing at both parents after
12061     + * a log replay (and the old parent still existing).
12062     + * Example:
12063     + *
12064     + * mkdir /mnt/A
12065     + * mkdir /mnt/B
12066     + * touch /mnt/B/bar
12067     + * sync
12068     + * mv /mnt/B/bar /mnt/A/bar
12069     + * mv -T /mnt/A /mnt/B
12070     + * fsync /mnt/B/bar
12071     + * <power fail>
12072     + *
12073     + * If we ignore the old parent B which got deleted,
12074     + * after a log replay we would have file bar linked
12075     + * at both parents and the old parent B would still
12076     + * exist.
12077     + */
12078     + if (IS_ERR(dir_inode)) {
12079     + ret = PTR_ERR(dir_inode);
12080     + goto out;
12081     + }
12082    
12083     if (ctx)
12084     ctx->log_new_dentries = false;
12085     @@ -5640,7 +5682,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
12086     if (ret)
12087     goto end_no_trans;
12088    
12089     - if (btrfs_inode_in_log(inode, trans->transid)) {
12090     + /*
12091     + * Skip already logged inodes or inodes corresponding to tmpfiles
12092     + * (since logging them is pointless, a link count of 0 means they
12093     + * will never be accessible).
12094     + */
12095     + if (btrfs_inode_in_log(inode, trans->transid) ||
12096     + inode->vfs_inode.i_nlink == 0) {
12097     ret = BTRFS_NO_LOG_SYNC;
12098     goto end_no_trans;
12099     }
12100     diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
12101     index f1fbea947fef..06576797cf31 100644
12102     --- a/fs/cifs/cifs_debug.c
12103     +++ b/fs/cifs/cifs_debug.c
12104     @@ -383,6 +383,9 @@ static ssize_t cifs_stats_proc_write(struct file *file,
12105     atomic_set(&totBufAllocCount, 0);
12106     atomic_set(&totSmBufAllocCount, 0);
12107     #endif /* CONFIG_CIFS_STATS2 */
12108     + atomic_set(&tcpSesReconnectCount, 0);
12109     + atomic_set(&tconInfoReconnectCount, 0);
12110     +
12111     spin_lock(&GlobalMid_Lock);
12112     GlobalMaxActiveXid = 0;
12113     GlobalCurrentXid = 0;
12114     diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
12115     index b611fc2e8984..7f01c6e60791 100644
12116     --- a/fs/cifs/cifs_spnego.c
12117     +++ b/fs/cifs/cifs_spnego.c
12118     @@ -147,8 +147,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
12119     sprintf(dp, ";sec=krb5");
12120     else if (server->sec_mskerberos)
12121     sprintf(dp, ";sec=mskrb5");
12122     - else
12123     - goto out;
12124     + else {
12125     + cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
12126     + sprintf(dp, ";sec=krb5");
12127     + }
12128    
12129     dp = description + strlen(description);
12130     sprintf(dp, ";uid=0x%x",
12131     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
12132     index 6e8765f44508..020f49c15b30 100644
12133     --- a/fs/cifs/inode.c
12134     +++ b/fs/cifs/inode.c
12135     @@ -777,7 +777,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
12136     } else if (rc == -EREMOTE) {
12137     cifs_create_dfs_fattr(&fattr, sb);
12138     rc = 0;
12139     - } else if (rc == -EACCES && backup_cred(cifs_sb)) {
12140     + } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
12141     + (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
12142     + == 0)) {
12143     + /*
12144     + * For SMB2 and later the backup intent flag is already
12145     + * sent if needed on open and there is no path based
12146     + * FindFirst operation to use to retry with
12147     + */
12148     +
12149     srchinf = kzalloc(sizeof(struct cifs_search_info),
12150     GFP_KERNEL);
12151     if (srchinf == NULL) {
12152     diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
12153     index b48f43963da6..333729cf46cd 100644
12154     --- a/fs/cifs/transport.c
12155     +++ b/fs/cifs/transport.c
12156     @@ -786,7 +786,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
12157     int i, j, rc = 0;
12158     int timeout, optype;
12159     struct mid_q_entry *midQ[MAX_COMPOUND];
12160     - unsigned int credits = 1;
12161     + unsigned int credits = 0;
12162     char *buf;
12163    
12164     timeout = flags & CIFS_TIMEOUT_MASK;
12165     @@ -851,17 +851,20 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
12166    
12167     mutex_unlock(&ses->server->srv_mutex);
12168    
12169     - for (i = 0; i < num_rqst; i++) {
12170     - if (rc < 0)
12171     - goto out;
12172     + if (rc < 0)
12173     + goto out;
12174    
12175     - if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
12176     - smb311_update_preauth_hash(ses, rqst[i].rq_iov,
12177     - rqst[i].rq_nvec);
12178     + /*
12179     + * Compounding is never used during session establish.
12180     + */
12181     + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
12182     + smb311_update_preauth_hash(ses, rqst[0].rq_iov,
12183     + rqst[0].rq_nvec);
12184    
12185     - if (timeout == CIFS_ASYNC_OP)
12186     - goto out;
12187     + if (timeout == CIFS_ASYNC_OP)
12188     + goto out;
12189    
12190     + for (i = 0; i < num_rqst; i++) {
12191     rc = wait_for_response(ses->server, midQ[i]);
12192     if (rc != 0) {
12193     cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
12194     @@ -877,10 +880,21 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
12195     }
12196     spin_unlock(&GlobalMid_Lock);
12197     }
12198     + }
12199     +
12200     + for (i = 0; i < num_rqst; i++)
12201     + if (midQ[i]->resp_buf)
12202     + credits += ses->server->ops->get_credits(midQ[i]);
12203     + if (!credits)
12204     + credits = 1;
12205     +
12206     + for (i = 0; i < num_rqst; i++) {
12207     + if (rc < 0)
12208     + goto out;
12209    
12210     rc = cifs_sync_mid_result(midQ[i], ses->server);
12211     if (rc != 0) {
12212     - add_credits(ses->server, 1, optype);
12213     + add_credits(ses->server, credits, optype);
12214     return rc;
12215     }
12216    
12217     @@ -901,23 +915,26 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
12218     else
12219     resp_buf_type[i] = CIFS_SMALL_BUFFER;
12220    
12221     - if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
12222     - struct kvec iov = {
12223     - .iov_base = resp_iov[i].iov_base,
12224     - .iov_len = resp_iov[i].iov_len
12225     - };
12226     - smb311_update_preauth_hash(ses, &iov, 1);
12227     - }
12228     -
12229     - credits = ses->server->ops->get_credits(midQ[i]);
12230     -
12231     rc = ses->server->ops->check_receive(midQ[i], ses->server,
12232     flags & CIFS_LOG_ERROR);
12233    
12234     /* mark it so buf will not be freed by cifs_delete_mid */
12235     if ((flags & CIFS_NO_RESP) == 0)
12236     midQ[i]->resp_buf = NULL;
12237     +
12238     }
12239     +
12240     + /*
12241     + * Compounding is never used during session establish.
12242     + */
12243     + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
12244     + struct kvec iov = {
12245     + .iov_base = resp_iov[0].iov_base,
12246     + .iov_len = resp_iov[0].iov_len
12247     + };
12248     + smb311_update_preauth_hash(ses, &iov, 1);
12249     + }
12250     +
12251     out:
12252     /*
12253     * This will dequeue all mids. After this it is important that the
12254     diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
12255     index f408994fc632..6e000392e4a4 100644
12256     --- a/fs/cramfs/inode.c
12257     +++ b/fs/cramfs/inode.c
12258     @@ -202,7 +202,8 @@ static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
12259     continue;
12260     blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
12261     blk_offset += offset;
12262     - if (blk_offset + len > BUFFER_SIZE)
12263     + if (blk_offset > BUFFER_SIZE ||
12264     + blk_offset + len > BUFFER_SIZE)
12265     continue;
12266     return read_buffers[i] + blk_offset;
12267     }
12268     diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
12269     index 39c20ef26db4..79debfc9cef9 100644
12270     --- a/fs/crypto/fscrypt_private.h
12271     +++ b/fs/crypto/fscrypt_private.h
12272     @@ -83,10 +83,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
12273     filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
12274     return true;
12275    
12276     - if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
12277     - filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
12278     - return true;
12279     -
12280     return false;
12281     }
12282    
12283     diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
12284     index e997ca51192f..7874c9bb2fc5 100644
12285     --- a/fs/crypto/keyinfo.c
12286     +++ b/fs/crypto/keyinfo.c
12287     @@ -174,16 +174,6 @@ static struct fscrypt_mode {
12288     .cipher_str = "cts(cbc(aes))",
12289     .keysize = 16,
12290     },
12291     - [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
12292     - .friendly_name = "Speck128/256-XTS",
12293     - .cipher_str = "xts(speck128)",
12294     - .keysize = 64,
12295     - },
12296     - [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
12297     - .friendly_name = "Speck128/256-CTS-CBC",
12298     - .cipher_str = "cts(cbc(speck128))",
12299     - .keysize = 32,
12300     - },
12301     };
12302    
12303     static struct fscrypt_mode *
12304     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
12305     index caff935fbeb8..5cfb1e2f6a5b 100644
12306     --- a/fs/ext4/ext4.h
12307     +++ b/fs/ext4/ext4.h
12308     @@ -1401,7 +1401,8 @@ struct ext4_sb_info {
12309     u32 s_min_batch_time;
12310     struct block_device *journal_bdev;
12311     #ifdef CONFIG_QUOTA
12312     - char *s_qf_names[EXT4_MAXQUOTAS]; /* Names of quota files with journalled quota */
12313     + /* Names of quota files with journalled quota */
12314     + char __rcu *s_qf_names[EXT4_MAXQUOTAS];
12315     int s_jquota_fmt; /* Format of quota to use */
12316     #endif
12317     unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
12318     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
12319     index 7b4736022761..9c4bac18cc6c 100644
12320     --- a/fs/ext4/inline.c
12321     +++ b/fs/ext4/inline.c
12322     @@ -863,7 +863,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
12323     handle_t *handle;
12324     struct page *page;
12325     struct ext4_iloc iloc;
12326     - int retries;
12327     + int retries = 0;
12328    
12329     ret = ext4_get_inode_loc(inode, &iloc);
12330     if (ret)
12331     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
12332     index a7074115d6f6..0edee31913d1 100644
12333     --- a/fs/ext4/ioctl.c
12334     +++ b/fs/ext4/ioctl.c
12335     @@ -67,7 +67,6 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
12336     ei1 = EXT4_I(inode1);
12337     ei2 = EXT4_I(inode2);
12338    
12339     - swap(inode1->i_flags, inode2->i_flags);
12340     swap(inode1->i_version, inode2->i_version);
12341     swap(inode1->i_blocks, inode2->i_blocks);
12342     swap(inode1->i_bytes, inode2->i_bytes);
12343     @@ -85,6 +84,21 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
12344     i_size_write(inode2, isize);
12345     }
12346    
12347     +static void reset_inode_seed(struct inode *inode)
12348     +{
12349     + struct ext4_inode_info *ei = EXT4_I(inode);
12350     + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
12351     + __le32 inum = cpu_to_le32(inode->i_ino);
12352     + __le32 gen = cpu_to_le32(inode->i_generation);
12353     + __u32 csum;
12354     +
12355     + if (!ext4_has_metadata_csum(inode->i_sb))
12356     + return;
12357     +
12358     + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum, sizeof(inum));
12359     + ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen, sizeof(gen));
12360     +}
12361     +
12362     /**
12363     * Swap the information from the given @inode and the inode
12364     * EXT4_BOOT_LOADER_INO. It will basically swap i_data and all other
12365     @@ -102,10 +116,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
12366     struct inode *inode_bl;
12367     struct ext4_inode_info *ei_bl;
12368    
12369     - if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode))
12370     + if (inode->i_nlink != 1 || !S_ISREG(inode->i_mode) ||
12371     + IS_SWAPFILE(inode) || IS_ENCRYPTED(inode) ||
12372     + ext4_has_inline_data(inode))
12373     return -EINVAL;
12374    
12375     - if (!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
12376     + if (IS_RDONLY(inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) ||
12377     + !inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
12378     return -EPERM;
12379    
12380     inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
12381     @@ -120,13 +137,13 @@ static long swap_inode_boot_loader(struct super_block *sb,
12382     * that only 1 swap_inode_boot_loader is running. */
12383     lock_two_nondirectories(inode, inode_bl);
12384    
12385     - truncate_inode_pages(&inode->i_data, 0);
12386     - truncate_inode_pages(&inode_bl->i_data, 0);
12387     -
12388     /* Wait for all existing dio workers */
12389     inode_dio_wait(inode);
12390     inode_dio_wait(inode_bl);
12391    
12392     + truncate_inode_pages(&inode->i_data, 0);
12393     + truncate_inode_pages(&inode_bl->i_data, 0);
12394     +
12395     handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
12396     if (IS_ERR(handle)) {
12397     err = -EINVAL;
12398     @@ -159,6 +176,8 @@ static long swap_inode_boot_loader(struct super_block *sb,
12399    
12400     inode->i_generation = prandom_u32();
12401     inode_bl->i_generation = prandom_u32();
12402     + reset_inode_seed(inode);
12403     + reset_inode_seed(inode_bl);
12404    
12405     ext4_discard_preallocations(inode);
12406    
12407     @@ -169,6 +188,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
12408     inode->i_ino, err);
12409     /* Revert all changes: */
12410     swap_inode_data(inode, inode_bl);
12411     + ext4_mark_inode_dirty(handle, inode);
12412     } else {
12413     err = ext4_mark_inode_dirty(handle, inode_bl);
12414     if (err < 0) {
12415     @@ -178,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
12416     /* Revert all changes: */
12417     swap_inode_data(inode, inode_bl);
12418     ext4_mark_inode_dirty(handle, inode);
12419     + ext4_mark_inode_dirty(handle, inode_bl);
12420     }
12421     }
12422     ext4_journal_stop(handle);
12423     @@ -339,19 +360,14 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
12424     if (projid_eq(kprojid, EXT4_I(inode)->i_projid))
12425     return 0;
12426    
12427     - err = mnt_want_write_file(filp);
12428     - if (err)
12429     - return err;
12430     -
12431     err = -EPERM;
12432     - inode_lock(inode);
12433     /* Is it quota file? Do not allow user to mess with it */
12434     if (ext4_is_quota_file(inode))
12435     - goto out_unlock;
12436     + return err;
12437    
12438     err = ext4_get_inode_loc(inode, &iloc);
12439     if (err)
12440     - goto out_unlock;
12441     + return err;
12442    
12443     raw_inode = ext4_raw_inode(&iloc);
12444     if (!EXT4_FITS_IN_INODE(raw_inode, ei, i_projid)) {
12445     @@ -359,20 +375,20 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
12446     EXT4_SB(sb)->s_want_extra_isize,
12447     &iloc);
12448     if (err)
12449     - goto out_unlock;
12450     + return err;
12451     } else {
12452     brelse(iloc.bh);
12453     }
12454    
12455     - dquot_initialize(inode);
12456     + err = dquot_initialize(inode);
12457     + if (err)
12458     + return err;
12459    
12460     handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
12461     EXT4_QUOTA_INIT_BLOCKS(sb) +
12462     EXT4_QUOTA_DEL_BLOCKS(sb) + 3);
12463     - if (IS_ERR(handle)) {
12464     - err = PTR_ERR(handle);
12465     - goto out_unlock;
12466     - }
12467     + if (IS_ERR(handle))
12468     + return PTR_ERR(handle);
12469    
12470     err = ext4_reserve_inode_write(handle, inode, &iloc);
12471     if (err)
12472     @@ -400,9 +416,6 @@ out_dirty:
12473     err = rc;
12474     out_stop:
12475     ext4_journal_stop(handle);
12476     -out_unlock:
12477     - inode_unlock(inode);
12478     - mnt_drop_write_file(filp);
12479     return err;
12480     }
12481     #else
12482     @@ -626,6 +639,30 @@ group_add_out:
12483     return err;
12484     }
12485    
12486     +static int ext4_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
12487     +{
12488     + /*
12489     + * Project Quota ID state is only allowed to change from within the init
12490     + * namespace. Enforce that restriction only if we are trying to change
12491     + * the quota ID state. Everything else is allowed in user namespaces.
12492     + */
12493     + if (current_user_ns() == &init_user_ns)
12494     + return 0;
12495     +
12496     + if (__kprojid_val(EXT4_I(inode)->i_projid) != fa->fsx_projid)
12497     + return -EINVAL;
12498     +
12499     + if (ext4_test_inode_flag(inode, EXT4_INODE_PROJINHERIT)) {
12500     + if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
12501     + return -EINVAL;
12502     + } else {
12503     + if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
12504     + return -EINVAL;
12505     + }
12506     +
12507     + return 0;
12508     +}
12509     +
12510     long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
12511     {
12512     struct inode *inode = file_inode(filp);
12513     @@ -1025,19 +1062,19 @@ resizefs_out:
12514     return err;
12515    
12516     inode_lock(inode);
12517     + err = ext4_ioctl_check_project(inode, &fa);
12518     + if (err)
12519     + goto out;
12520     flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
12521     (flags & EXT4_FL_XFLAG_VISIBLE);
12522     err = ext4_ioctl_setflags(inode, flags);
12523     - inode_unlock(inode);
12524     - mnt_drop_write_file(filp);
12525     if (err)
12526     - return err;
12527     -
12528     + goto out;
12529     err = ext4_ioctl_setproject(filp, fa.fsx_projid);
12530     - if (err)
12531     - return err;
12532     -
12533     - return 0;
12534     +out:
12535     + inode_unlock(inode);
12536     + mnt_drop_write_file(filp);
12537     + return err;
12538     }
12539     case EXT4_IOC_SHUTDOWN:
12540     return ext4_shutdown(sb, arg);
12541     diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
12542     index a409ff70d67b..2f5be02fc6f6 100644
12543     --- a/fs/ext4/move_extent.c
12544     +++ b/fs/ext4/move_extent.c
12545     @@ -516,9 +516,13 @@ mext_check_arguments(struct inode *orig_inode,
12546     orig_inode->i_ino, donor_inode->i_ino);
12547     return -EINVAL;
12548     }
12549     - if (orig_eof < orig_start + *len - 1)
12550     + if (orig_eof <= orig_start)
12551     + *len = 0;
12552     + else if (orig_eof < orig_start + *len - 1)
12553     *len = orig_eof - orig_start;
12554     - if (donor_eof < donor_start + *len - 1)
12555     + if (donor_eof <= donor_start)
12556     + *len = 0;
12557     + else if (donor_eof < donor_start + *len - 1)
12558     *len = donor_eof - donor_start;
12559     if (!*len) {
12560     ext4_debug("ext4 move extent: len should not be 0 "
12561     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
12562     index 1145109968ef..d3d4643ab79b 100644
12563     --- a/fs/ext4/super.c
12564     +++ b/fs/ext4/super.c
12565     @@ -914,6 +914,18 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
12566     for (type = 0; type < EXT4_MAXQUOTAS; type++)
12567     ext4_quota_off(sb, type);
12568     }
12569     +
12570     +/*
12571     + * This is a helper function which is used in the mount/remount
12572     + * codepaths (which holds s_umount) to fetch the quota file name.
12573     + */
12574     +static inline char *get_qf_name(struct super_block *sb,
12575     + struct ext4_sb_info *sbi,
12576     + int type)
12577     +{
12578     + return rcu_dereference_protected(sbi->s_qf_names[type],
12579     + lockdep_is_held(&sb->s_umount));
12580     +}
12581     #else
12582     static inline void ext4_quota_off_umount(struct super_block *sb)
12583     {
12584     @@ -965,7 +977,7 @@ static void ext4_put_super(struct super_block *sb)
12585     percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
12586     #ifdef CONFIG_QUOTA
12587     for (i = 0; i < EXT4_MAXQUOTAS; i++)
12588     - kfree(sbi->s_qf_names[i]);
12589     + kfree(get_qf_name(sb, sbi, i));
12590     #endif
12591    
12592     /* Debugging code just in case the in-memory inode orphan list
12593     @@ -1530,11 +1542,10 @@ static const char deprecated_msg[] =
12594     static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
12595     {
12596     struct ext4_sb_info *sbi = EXT4_SB(sb);
12597     - char *qname;
12598     + char *qname, *old_qname = get_qf_name(sb, sbi, qtype);
12599     int ret = -1;
12600    
12601     - if (sb_any_quota_loaded(sb) &&
12602     - !sbi->s_qf_names[qtype]) {
12603     + if (sb_any_quota_loaded(sb) && !old_qname) {
12604     ext4_msg(sb, KERN_ERR,
12605     "Cannot change journaled "
12606     "quota options when quota turned on");
12607     @@ -1551,8 +1562,8 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
12608     "Not enough memory for storing quotafile name");
12609     return -1;
12610     }
12611     - if (sbi->s_qf_names[qtype]) {
12612     - if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
12613     + if (old_qname) {
12614     + if (strcmp(old_qname, qname) == 0)
12615     ret = 1;
12616     else
12617     ext4_msg(sb, KERN_ERR,
12618     @@ -1565,7 +1576,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
12619     "quotafile must be on filesystem root");
12620     goto errout;
12621     }
12622     - sbi->s_qf_names[qtype] = qname;
12623     + rcu_assign_pointer(sbi->s_qf_names[qtype], qname);
12624     set_opt(sb, QUOTA);
12625     return 1;
12626     errout:
12627     @@ -1577,15 +1588,16 @@ static int clear_qf_name(struct super_block *sb, int qtype)
12628     {
12629    
12630     struct ext4_sb_info *sbi = EXT4_SB(sb);
12631     + char *old_qname = get_qf_name(sb, sbi, qtype);
12632    
12633     - if (sb_any_quota_loaded(sb) &&
12634     - sbi->s_qf_names[qtype]) {
12635     + if (sb_any_quota_loaded(sb) && old_qname) {
12636     ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
12637     " when quota turned on");
12638     return -1;
12639     }
12640     - kfree(sbi->s_qf_names[qtype]);
12641     - sbi->s_qf_names[qtype] = NULL;
12642     + rcu_assign_pointer(sbi->s_qf_names[qtype], NULL);
12643     + synchronize_rcu();
12644     + kfree(old_qname);
12645     return 1;
12646     }
12647     #endif
12648     @@ -1960,7 +1972,7 @@ static int parse_options(char *options, struct super_block *sb,
12649     int is_remount)
12650     {
12651     struct ext4_sb_info *sbi = EXT4_SB(sb);
12652     - char *p;
12653     + char *p, __maybe_unused *usr_qf_name, __maybe_unused *grp_qf_name;
12654     substring_t args[MAX_OPT_ARGS];
12655     int token;
12656    
12657     @@ -1991,11 +2003,13 @@ static int parse_options(char *options, struct super_block *sb,
12658     "Cannot enable project quota enforcement.");
12659     return 0;
12660     }
12661     - if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
12662     - if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
12663     + usr_qf_name = get_qf_name(sb, sbi, USRQUOTA);
12664     + grp_qf_name = get_qf_name(sb, sbi, GRPQUOTA);
12665     + if (usr_qf_name || grp_qf_name) {
12666     + if (test_opt(sb, USRQUOTA) && usr_qf_name)
12667     clear_opt(sb, USRQUOTA);
12668    
12669     - if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
12670     + if (test_opt(sb, GRPQUOTA) && grp_qf_name)
12671     clear_opt(sb, GRPQUOTA);
12672    
12673     if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
12674     @@ -2029,6 +2043,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
12675     {
12676     #if defined(CONFIG_QUOTA)
12677     struct ext4_sb_info *sbi = EXT4_SB(sb);
12678     + char *usr_qf_name, *grp_qf_name;
12679    
12680     if (sbi->s_jquota_fmt) {
12681     char *fmtname = "";
12682     @@ -2047,11 +2062,14 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
12683     seq_printf(seq, ",jqfmt=%s", fmtname);
12684     }
12685    
12686     - if (sbi->s_qf_names[USRQUOTA])
12687     - seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
12688     -
12689     - if (sbi->s_qf_names[GRPQUOTA])
12690     - seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
12691     + rcu_read_lock();
12692     + usr_qf_name = rcu_dereference(sbi->s_qf_names[USRQUOTA]);
12693     + grp_qf_name = rcu_dereference(sbi->s_qf_names[GRPQUOTA]);
12694     + if (usr_qf_name)
12695     + seq_show_option(seq, "usrjquota", usr_qf_name);
12696     + if (grp_qf_name)
12697     + seq_show_option(seq, "grpjquota", grp_qf_name);
12698     + rcu_read_unlock();
12699     #endif
12700     }
12701    
12702     @@ -5103,6 +5121,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
12703     int err = 0;
12704     #ifdef CONFIG_QUOTA
12705     int i, j;
12706     + char *to_free[EXT4_MAXQUOTAS];
12707     #endif
12708     char *orig_data = kstrdup(data, GFP_KERNEL);
12709    
12710     @@ -5122,8 +5141,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
12711     old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
12712     for (i = 0; i < EXT4_MAXQUOTAS; i++)
12713     if (sbi->s_qf_names[i]) {
12714     - old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
12715     - GFP_KERNEL);
12716     + char *qf_name = get_qf_name(sb, sbi, i);
12717     +
12718     + old_opts.s_qf_names[i] = kstrdup(qf_name, GFP_KERNEL);
12719     if (!old_opts.s_qf_names[i]) {
12720     for (j = 0; j < i; j++)
12721     kfree(old_opts.s_qf_names[j]);
12722     @@ -5352,9 +5372,12 @@ restore_opts:
12723     #ifdef CONFIG_QUOTA
12724     sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
12725     for (i = 0; i < EXT4_MAXQUOTAS; i++) {
12726     - kfree(sbi->s_qf_names[i]);
12727     - sbi->s_qf_names[i] = old_opts.s_qf_names[i];
12728     + to_free[i] = get_qf_name(sb, sbi, i);
12729     + rcu_assign_pointer(sbi->s_qf_names[i], old_opts.s_qf_names[i]);
12730     }
12731     + synchronize_rcu();
12732     + for (i = 0; i < EXT4_MAXQUOTAS; i++)
12733     + kfree(to_free[i]);
12734     #endif
12735     kfree(orig_data);
12736     return err;
12737     @@ -5545,7 +5568,7 @@ static int ext4_write_info(struct super_block *sb, int type)
12738     */
12739     static int ext4_quota_on_mount(struct super_block *sb, int type)
12740     {
12741     - return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
12742     + return dquot_quota_on_mount(sb, get_qf_name(sb, EXT4_SB(sb), type),
12743     EXT4_SB(sb)->s_jquota_fmt, type);
12744     }
12745    
12746     diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
12747     index e8b6b89bddb8..59d0472013f4 100644
12748     --- a/fs/f2fs/checkpoint.c
12749     +++ b/fs/f2fs/checkpoint.c
12750     @@ -696,6 +696,8 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
12751     /* clear Orphan Flag */
12752     clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
12753     out:
12754     + set_sbi_flag(sbi, SBI_IS_RECOVERED);
12755     +
12756     #ifdef CONFIG_QUOTA
12757     /* Turn quotas off */
12758     if (quota_enabled)
12759     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
12760     index 382c1ef9a9e4..11f28342f641 100644
12761     --- a/fs/f2fs/data.c
12762     +++ b/fs/f2fs/data.c
12763     @@ -80,7 +80,8 @@ static void __read_end_io(struct bio *bio)
12764     /* PG_error was set if any post_read step failed */
12765     if (bio->bi_status || PageError(page)) {
12766     ClearPageUptodate(page);
12767     - SetPageError(page);
12768     + /* will re-read again later */
12769     + ClearPageError(page);
12770     } else {
12771     SetPageUptodate(page);
12772     }
12773     @@ -456,12 +457,16 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
12774     bio_put(bio);
12775     return -EFAULT;
12776     }
12777     - bio_set_op_attrs(bio, fio->op, fio->op_flags);
12778    
12779     - __submit_bio(fio->sbi, bio, fio->type);
12780     + if (fio->io_wbc && !is_read_io(fio->op))
12781     + wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
12782     +
12783     + bio_set_op_attrs(bio, fio->op, fio->op_flags);
12784    
12785     if (!is_read_io(fio->op))
12786     inc_page_count(fio->sbi, WB_DATA_TYPE(fio->page));
12787     +
12788     + __submit_bio(fio->sbi, bio, fio->type);
12789     return 0;
12790     }
12791    
12792     @@ -586,6 +591,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
12793     bio_put(bio);
12794     return -EFAULT;
12795     }
12796     + ClearPageError(page);
12797     __submit_bio(F2FS_I_SB(inode), bio, DATA);
12798     return 0;
12799     }
12800     @@ -1561,6 +1567,7 @@ submit_and_realloc:
12801     if (bio_add_page(bio, page, blocksize, 0) < blocksize)
12802     goto submit_and_realloc;
12803    
12804     + ClearPageError(page);
12805     last_block_in_bio = block_nr;
12806     goto next_page;
12807     set_error_page:
12808     @@ -2583,10 +2590,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
12809     if (!PageUptodate(page))
12810     SetPageUptodate(page);
12811    
12812     - /* don't remain PG_checked flag which was set during GC */
12813     - if (is_cold_data(page))
12814     - clear_cold_data(page);
12815     -
12816     if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
12817     if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
12818     f2fs_register_inmem_page(inode, page);
12819     diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
12820     index 231b77ef5a53..a70cd2580eae 100644
12821     --- a/fs/f2fs/extent_cache.c
12822     +++ b/fs/f2fs/extent_cache.c
12823     @@ -308,14 +308,13 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
12824     return count - atomic_read(&et->node_cnt);
12825     }
12826    
12827     -static void __drop_largest_extent(struct inode *inode,
12828     +static void __drop_largest_extent(struct extent_tree *et,
12829     pgoff_t fofs, unsigned int len)
12830     {
12831     - struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
12832     -
12833     - if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
12834     - largest->len = 0;
12835     - f2fs_mark_inode_dirty_sync(inode, true);
12836     + if (fofs < et->largest.fofs + et->largest.len &&
12837     + fofs + len > et->largest.fofs) {
12838     + et->largest.len = 0;
12839     + et->largest_updated = true;
12840     }
12841     }
12842    
12843     @@ -416,12 +415,11 @@ out:
12844     return ret;
12845     }
12846    
12847     -static struct extent_node *__try_merge_extent_node(struct inode *inode,
12848     +static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
12849     struct extent_tree *et, struct extent_info *ei,
12850     struct extent_node *prev_ex,
12851     struct extent_node *next_ex)
12852     {
12853     - struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
12854     struct extent_node *en = NULL;
12855    
12856     if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
12857     @@ -443,7 +441,7 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode,
12858     if (!en)
12859     return NULL;
12860    
12861     - __try_update_largest_extent(inode, et, en);
12862     + __try_update_largest_extent(et, en);
12863    
12864     spin_lock(&sbi->extent_lock);
12865     if (!list_empty(&en->list)) {
12866     @@ -454,12 +452,11 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode,
12867     return en;
12868     }
12869    
12870     -static struct extent_node *__insert_extent_tree(struct inode *inode,
12871     +static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
12872     struct extent_tree *et, struct extent_info *ei,
12873     struct rb_node **insert_p,
12874     struct rb_node *insert_parent)
12875     {
12876     - struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
12877     struct rb_node **p;
12878     struct rb_node *parent = NULL;
12879     struct extent_node *en = NULL;
12880     @@ -476,7 +473,7 @@ do_insert:
12881     if (!en)
12882     return NULL;
12883    
12884     - __try_update_largest_extent(inode, et, en);
12885     + __try_update_largest_extent(et, en);
12886    
12887     /* update in global extent list */
12888     spin_lock(&sbi->extent_lock);
12889     @@ -497,6 +494,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
12890     struct rb_node **insert_p = NULL, *insert_parent = NULL;
12891     unsigned int end = fofs + len;
12892     unsigned int pos = (unsigned int)fofs;
12893     + bool updated = false;
12894    
12895     if (!et)
12896     return;
12897     @@ -517,7 +515,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
12898     * drop largest extent before lookup, in case it's already
12899     * been shrunk from extent tree
12900     */
12901     - __drop_largest_extent(inode, fofs, len);
12902     + __drop_largest_extent(et, fofs, len);
12903    
12904     /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
12905     en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
12906     @@ -550,7 +548,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
12907     set_extent_info(&ei, end,
12908     end - dei.fofs + dei.blk,
12909     org_end - end);
12910     - en1 = __insert_extent_tree(inode, et, &ei,
12911     + en1 = __insert_extent_tree(sbi, et, &ei,
12912     NULL, NULL);
12913     next_en = en1;
12914     } else {
12915     @@ -570,7 +568,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
12916     }
12917    
12918     if (parts)
12919     - __try_update_largest_extent(inode, et, en);
12920     + __try_update_largest_extent(et, en);
12921     else
12922     __release_extent_node(sbi, et, en);
12923    
12924     @@ -590,15 +588,16 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
12925     if (blkaddr) {
12926    
12927     set_extent_info(&ei, fofs, blkaddr, len);
12928     - if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
12929     - __insert_extent_tree(inode, et, &ei,
12930     + if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
12931     + __insert_extent_tree(sbi, et, &ei,
12932     insert_p, insert_parent);
12933    
12934     /* give up extent_cache, if split and small updates happen */
12935     if (dei.len >= 1 &&
12936     prev.len < F2FS_MIN_EXTENT_LEN &&
12937     et->largest.len < F2FS_MIN_EXTENT_LEN) {
12938     - __drop_largest_extent(inode, 0, UINT_MAX);
12939     + et->largest.len = 0;
12940     + et->largest_updated = true;
12941     set_inode_flag(inode, FI_NO_EXTENT);
12942     }
12943     }
12944     @@ -606,7 +605,15 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
12945     if (is_inode_flag_set(inode, FI_NO_EXTENT))
12946     __free_extent_tree(sbi, et);
12947    
12948     + if (et->largest_updated) {
12949     + et->largest_updated = false;
12950     + updated = true;
12951     + }
12952     +
12953     write_unlock(&et->lock);
12954     +
12955     + if (updated)
12956     + f2fs_mark_inode_dirty_sync(inode, true);
12957     }
12958    
12959     unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
12960     @@ -705,6 +712,7 @@ void f2fs_drop_extent_tree(struct inode *inode)
12961     {
12962     struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
12963     struct extent_tree *et = F2FS_I(inode)->extent_tree;
12964     + bool updated = false;
12965    
12966     if (!f2fs_may_extent_tree(inode))
12967     return;
12968     @@ -713,8 +721,13 @@ void f2fs_drop_extent_tree(struct inode *inode)
12969    
12970     write_lock(&et->lock);
12971     __free_extent_tree(sbi, et);
12972     - __drop_largest_extent(inode, 0, UINT_MAX);
12973     + if (et->largest.len) {
12974     + et->largest.len = 0;
12975     + updated = true;
12976     + }
12977     write_unlock(&et->lock);
12978     + if (updated)
12979     + f2fs_mark_inode_dirty_sync(inode, true);
12980     }
12981    
12982     void f2fs_destroy_extent_tree(struct inode *inode)
12983     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
12984     index abf925664d9c..ecb735142276 100644
12985     --- a/fs/f2fs/f2fs.h
12986     +++ b/fs/f2fs/f2fs.h
12987     @@ -572,6 +572,7 @@ struct extent_tree {
12988     struct list_head list; /* to be used by sbi->zombie_list */
12989     rwlock_t lock; /* protect extent info rb-tree */
12990     atomic_t node_cnt; /* # of extent node in rb-tree*/
12991     + bool largest_updated; /* largest extent updated */
12992     };
12993    
12994     /*
12995     @@ -754,12 +755,12 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
12996     }
12997    
12998     extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
12999     -static inline void __try_update_largest_extent(struct inode *inode,
13000     - struct extent_tree *et, struct extent_node *en)
13001     +static inline void __try_update_largest_extent(struct extent_tree *et,
13002     + struct extent_node *en)
13003     {
13004     if (en->ei.len > et->largest.len) {
13005     et->largest = en->ei;
13006     - f2fs_mark_inode_dirty_sync(inode, true);
13007     + et->largest_updated = true;
13008     }
13009     }
13010    
13011     @@ -1088,6 +1089,7 @@ enum {
13012     SBI_NEED_SB_WRITE, /* need to recover superblock */
13013     SBI_NEED_CP, /* need to checkpoint */
13014     SBI_IS_SHUTDOWN, /* shutdown by ioctl */
13015     + SBI_IS_RECOVERED, /* recovered orphan/data */
13016     };
13017    
13018     enum {
13019     diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
13020     index 959df2249875..dd608b819a3c 100644
13021     --- a/fs/f2fs/inode.c
13022     +++ b/fs/f2fs/inode.c
13023     @@ -368,6 +368,12 @@ static int do_read_inode(struct inode *inode)
13024     if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
13025     __recover_inline_status(inode, node_page);
13026    
13027     + /* try to recover cold bit for non-dir inode */
13028     + if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
13029     + set_cold_node(node_page, false);
13030     + set_page_dirty(node_page);
13031     + }
13032     +
13033     /* get rdev by using inline_info */
13034     __get_inode_rdev(inode, ri);
13035    
13036     diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
13037     index dd2e45a661aa..42ea42acb487 100644
13038     --- a/fs/f2fs/node.c
13039     +++ b/fs/f2fs/node.c
13040     @@ -1542,8 +1542,10 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
13041     }
13042    
13043     if (__is_valid_data_blkaddr(ni.blk_addr) &&
13044     - !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
13045     + !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
13046     + up_read(&sbi->node_write);
13047     goto redirty_out;
13048     + }
13049    
13050     if (atomic && !test_opt(sbi, NOBARRIER))
13051     fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
13052     @@ -2537,7 +2539,7 @@ retry:
13053     if (!PageUptodate(ipage))
13054     SetPageUptodate(ipage);
13055     fill_node_footer(ipage, ino, ino, 0, true);
13056     - set_cold_node(page, false);
13057     + set_cold_node(ipage, false);
13058    
13059     src = F2FS_INODE(page);
13060     dst = F2FS_INODE(ipage);
13061     @@ -2560,6 +2562,13 @@ retry:
13062     F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
13063     i_projid))
13064     dst->i_projid = src->i_projid;
13065     +
13066     + if (f2fs_sb_has_inode_crtime(sbi->sb) &&
13067     + F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
13068     + i_crtime_nsec)) {
13069     + dst->i_crtime = src->i_crtime;
13070     + dst->i_crtime_nsec = src->i_crtime_nsec;
13071     + }
13072     }
13073    
13074     new_ni = old_ni;
13075     diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
13076     index 95511ed11a22..9a8579fb3a30 100644
13077     --- a/fs/f2fs/recovery.c
13078     +++ b/fs/f2fs/recovery.c
13079     @@ -221,6 +221,7 @@ static void recover_inode(struct inode *inode, struct page *page)
13080     inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
13081    
13082     F2FS_I(inode)->i_advise = raw->i_advise;
13083     + F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
13084    
13085     recover_inline_flags(inode, raw);
13086    
13087     @@ -697,11 +698,15 @@ skip:
13088     /* let's drop all the directory inodes for clean checkpoint */
13089     destroy_fsync_dnodes(&dir_list);
13090    
13091     - if (!err && need_writecp) {
13092     - struct cp_control cpc = {
13093     - .reason = CP_RECOVERY,
13094     - };
13095     - err = f2fs_write_checkpoint(sbi, &cpc);
13096     + if (need_writecp) {
13097     + set_sbi_flag(sbi, SBI_IS_RECOVERED);
13098     +
13099     + if (!err) {
13100     + struct cp_control cpc = {
13101     + .reason = CP_RECOVERY,
13102     + };
13103     + err = f2fs_write_checkpoint(sbi, &cpc);
13104     + }
13105     }
13106    
13107     kmem_cache_destroy(fsync_entry_slab);
13108     diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
13109     index 896b885f504e..287c9fe9fff9 100644
13110     --- a/fs/f2fs/super.c
13111     +++ b/fs/f2fs/super.c
13112     @@ -1852,7 +1852,9 @@ static int f2fs_quota_off(struct super_block *sb, int type)
13113     if (!inode || !igrab(inode))
13114     return dquot_quota_off(sb, type);
13115    
13116     - f2fs_quota_sync(sb, type);
13117     + err = f2fs_quota_sync(sb, type);
13118     + if (err)
13119     + goto out_put;
13120    
13121     err = dquot_quota_off(sb, type);
13122     if (err || f2fs_sb_has_quota_ino(sb))
13123     @@ -1871,9 +1873,20 @@ out_put:
13124     void f2fs_quota_off_umount(struct super_block *sb)
13125     {
13126     int type;
13127     + int err;
13128    
13129     - for (type = 0; type < MAXQUOTAS; type++)
13130     - f2fs_quota_off(sb, type);
13131     + for (type = 0; type < MAXQUOTAS; type++) {
13132     + err = f2fs_quota_off(sb, type);
13133     + if (err) {
13134     + int ret = dquot_quota_off(sb, type);
13135     +
13136     + f2fs_msg(sb, KERN_ERR,
13137     + "Fail to turn off disk quota "
13138     + "(type: %d, err: %d, ret:%d), Please "
13139     + "run fsck to fix it.", type, err, ret);
13140     + set_sbi_flag(F2FS_SB(sb), SBI_NEED_FSCK);
13141     + }
13142     + }
13143     }
13144    
13145     static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
13146     @@ -3175,6 +3188,9 @@ static void kill_f2fs_super(struct super_block *sb)
13147     };
13148     f2fs_write_checkpoint(sbi, &cpc);
13149     }
13150     +
13151     + if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
13152     + sb->s_flags &= ~SB_RDONLY;
13153     }
13154     kill_block_super(sb);
13155     }
13156     diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
13157     index c2469833b4fb..6b84ef6ccff3 100644
13158     --- a/fs/gfs2/ops_fstype.c
13159     +++ b/fs/gfs2/ops_fstype.c
13160     @@ -1333,6 +1333,9 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
13161     struct path path;
13162     int error;
13163    
13164     + if (!dev_name || !*dev_name)
13165     + return ERR_PTR(-EINVAL);
13166     +
13167     error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
13168     if (error) {
13169     pr_warn("path_lookup on %s returned error %d\n",
13170     diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
13171     index c125d662777c..26f8d7e46462 100644
13172     --- a/fs/jbd2/checkpoint.c
13173     +++ b/fs/jbd2/checkpoint.c
13174     @@ -251,8 +251,8 @@ restart:
13175     bh = jh2bh(jh);
13176    
13177     if (buffer_locked(bh)) {
13178     - spin_unlock(&journal->j_list_lock);
13179     get_bh(bh);
13180     + spin_unlock(&journal->j_list_lock);
13181     wait_on_buffer(bh);
13182     /* the journal_head may have gone by now */
13183     BUFFER_TRACE(bh, "brelse");
13184     @@ -333,8 +333,8 @@ restart2:
13185     jh = transaction->t_checkpoint_io_list;
13186     bh = jh2bh(jh);
13187     if (buffer_locked(bh)) {
13188     - spin_unlock(&journal->j_list_lock);
13189     get_bh(bh);
13190     + spin_unlock(&journal->j_list_lock);
13191     wait_on_buffer(bh);
13192     /* the journal_head may have gone by now */
13193     BUFFER_TRACE(bh, "brelse");
13194     diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
13195     index 87bdf0f4cba1..902a7dd10e5c 100644
13196     --- a/fs/jffs2/super.c
13197     +++ b/fs/jffs2/super.c
13198     @@ -285,10 +285,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
13199     sb->s_fs_info = c;
13200    
13201     ret = jffs2_parse_options(c, data);
13202     - if (ret) {
13203     - kfree(c);
13204     + if (ret)
13205     return -EINVAL;
13206     - }
13207    
13208     /* Initialize JFFS2 superblock locks, the further initialization will
13209     * be done later */
13210     diff --git a/fs/lockd/host.c b/fs/lockd/host.c
13211     index d35cd6be0675..93fb7cf0b92b 100644
13212     --- a/fs/lockd/host.c
13213     +++ b/fs/lockd/host.c
13214     @@ -341,7 +341,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
13215     };
13216     struct lockd_net *ln = net_generic(net, lockd_net_id);
13217    
13218     - dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
13219     + dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__,
13220     (int)hostname_len, hostname, rqstp->rq_vers,
13221     (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
13222    
13223     diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
13224     index 146e30862234..8f53455c4765 100644
13225     --- a/fs/nfs/nfs4client.c
13226     +++ b/fs/nfs/nfs4client.c
13227     @@ -950,10 +950,10 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
13228    
13229     /*
13230     * Session has been established, and the client marked ready.
13231     - * Set the mount rsize and wsize with negotiated fore channel
13232     - * attributes which will be bound checked in nfs_server_set_fsinfo.
13233     + * Limit the mount rsize, wsize and dtsize using negotiated fore
13234     + * channel attributes.
13235     */
13236     -static void nfs4_session_set_rwsize(struct nfs_server *server)
13237     +static void nfs4_session_limit_rwsize(struct nfs_server *server)
13238     {
13239     #ifdef CONFIG_NFS_V4_1
13240     struct nfs4_session *sess;
13241     @@ -966,9 +966,11 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
13242     server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
13243     server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
13244    
13245     - if (!server->rsize || server->rsize > server_resp_sz)
13246     + if (server->dtsize > server_resp_sz)
13247     + server->dtsize = server_resp_sz;
13248     + if (server->rsize > server_resp_sz)
13249     server->rsize = server_resp_sz;
13250     - if (!server->wsize || server->wsize > server_rqst_sz)
13251     + if (server->wsize > server_rqst_sz)
13252     server->wsize = server_rqst_sz;
13253     #endif /* CONFIG_NFS_V4_1 */
13254     }
13255     @@ -1015,12 +1017,12 @@ static int nfs4_server_common_setup(struct nfs_server *server,
13256     (unsigned long long) server->fsid.minor);
13257     nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
13258    
13259     - nfs4_session_set_rwsize(server);
13260     -
13261     error = nfs_probe_fsinfo(server, mntfh, fattr);
13262     if (error < 0)
13263     goto out;
13264    
13265     + nfs4_session_limit_rwsize(server);
13266     +
13267     if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
13268     server->namelen = NFS4_MAXNAMLEN;
13269    
13270     diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
13271     index bb5476a6d264..3dbd15b47c27 100644
13272     --- a/fs/nfs/pagelist.c
13273     +++ b/fs/nfs/pagelist.c
13274     @@ -1111,6 +1111,20 @@ static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
13275     return ret;
13276     }
13277    
13278     +static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
13279     +{
13280     + u32 midx;
13281     + struct nfs_pgio_mirror *mirror;
13282     +
13283     + if (!desc->pg_error)
13284     + return;
13285     +
13286     + for (midx = 0; midx < desc->pg_mirror_count; midx++) {
13287     + mirror = &desc->pg_mirrors[midx];
13288     + desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
13289     + }
13290     +}
13291     +
13292     int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
13293     struct nfs_page *req)
13294     {
13295     @@ -1161,25 +1175,11 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
13296     return 1;
13297    
13298     out_failed:
13299     - /*
13300     - * We might have failed before sending any reqs over wire.
13301     - * Clean up rest of the reqs in mirror pg_list.
13302     - */
13303     - if (desc->pg_error) {
13304     - struct nfs_pgio_mirror *mirror;
13305     - void (*func)(struct list_head *);
13306     -
13307     - /* remember fatal errors */
13308     - if (nfs_error_is_fatal(desc->pg_error))
13309     - nfs_context_set_write_error(req->wb_context,
13310     - desc->pg_error);
13311     -
13312     - func = desc->pg_completion_ops->error_cleanup;
13313     - for (midx = 0; midx < desc->pg_mirror_count; midx++) {
13314     - mirror = &desc->pg_mirrors[midx];
13315     - func(&mirror->pg_list);
13316     - }
13317     - }
13318     + /* remember fatal errors */
13319     + if (nfs_error_is_fatal(desc->pg_error))
13320     + nfs_context_set_write_error(req->wb_context,
13321     + desc->pg_error);
13322     + nfs_pageio_error_cleanup(desc);
13323     return 0;
13324     }
13325    
13326     @@ -1251,6 +1251,8 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
13327     for (midx = 0; midx < desc->pg_mirror_count; midx++)
13328     nfs_pageio_complete_mirror(desc, midx);
13329    
13330     + if (desc->pg_error < 0)
13331     + nfs_pageio_error_cleanup(desc);
13332     if (desc->pg_ops->pg_cleanup)
13333     desc->pg_ops->pg_cleanup(desc);
13334     nfs_pageio_cleanup_mirroring(desc);
13335     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
13336     index b0ca0efd2875..9c6d1d57b598 100644
13337     --- a/fs/nfsd/nfs4state.c
13338     +++ b/fs/nfsd/nfs4state.c
13339     @@ -4364,7 +4364,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
13340    
13341     fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
13342     if (!fl)
13343     - goto out_stid;
13344     + goto out_clnt_odstate;
13345    
13346     status = vfs_setlease(fp->fi_deleg_file, fl->fl_type, &fl, NULL);
13347     if (fl)
13348     @@ -4389,7 +4389,6 @@ out_unlock:
13349     vfs_setlease(fp->fi_deleg_file, F_UNLCK, NULL, (void **)&dp);
13350     out_clnt_odstate:
13351     put_clnt_odstate(dp->dl_clnt_odstate);
13352     -out_stid:
13353     nfs4_put_stid(&dp->dl_stid);
13354     out_delegees:
13355     put_deleg_file(fp);
13356     diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
13357     index ababdbfab537..f43ea1aad542 100644
13358     --- a/fs/notify/fsnotify.c
13359     +++ b/fs/notify/fsnotify.c
13360     @@ -96,6 +96,9 @@ void fsnotify_unmount_inodes(struct super_block *sb)
13361    
13362     if (iput_inode)
13363     iput(iput_inode);
13364     + /* Wait for outstanding inode references from connectors */
13365     + wait_var_event(&sb->s_fsnotify_inode_refs,
13366     + !atomic_long_read(&sb->s_fsnotify_inode_refs));
13367     }
13368    
13369     /*
13370     diff --git a/fs/notify/mark.c b/fs/notify/mark.c
13371     index 59cdb27826de..09535f6423fc 100644
13372     --- a/fs/notify/mark.c
13373     +++ b/fs/notify/mark.c
13374     @@ -179,17 +179,20 @@ static void fsnotify_connector_destroy_workfn(struct work_struct *work)
13375     }
13376     }
13377    
13378     -static struct inode *fsnotify_detach_connector_from_object(
13379     - struct fsnotify_mark_connector *conn)
13380     +static void *fsnotify_detach_connector_from_object(
13381     + struct fsnotify_mark_connector *conn,
13382     + unsigned int *type)
13383     {
13384     struct inode *inode = NULL;
13385    
13386     + *type = conn->type;
13387     if (conn->type == FSNOTIFY_OBJ_TYPE_DETACHED)
13388     return NULL;
13389    
13390     if (conn->type == FSNOTIFY_OBJ_TYPE_INODE) {
13391     inode = fsnotify_conn_inode(conn);
13392     inode->i_fsnotify_mask = 0;
13393     + atomic_long_inc(&inode->i_sb->s_fsnotify_inode_refs);
13394     } else if (conn->type == FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
13395     fsnotify_conn_mount(conn)->mnt_fsnotify_mask = 0;
13396     }
13397     @@ -211,10 +214,29 @@ static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
13398     fsnotify_put_group(group);
13399     }
13400    
13401     +/* Drop object reference originally held by a connector */
13402     +static void fsnotify_drop_object(unsigned int type, void *objp)
13403     +{
13404     + struct inode *inode;
13405     + struct super_block *sb;
13406     +
13407     + if (!objp)
13408     + return;
13409     + /* Currently only inode references are passed to be dropped */
13410     + if (WARN_ON_ONCE(type != FSNOTIFY_OBJ_TYPE_INODE))
13411     + return;
13412     + inode = objp;
13413     + sb = inode->i_sb;
13414     + iput(inode);
13415     + if (atomic_long_dec_and_test(&sb->s_fsnotify_inode_refs))
13416     + wake_up_var(&sb->s_fsnotify_inode_refs);
13417     +}
13418     +
13419     void fsnotify_put_mark(struct fsnotify_mark *mark)
13420     {
13421     struct fsnotify_mark_connector *conn;
13422     - struct inode *inode = NULL;
13423     + void *objp = NULL;
13424     + unsigned int type = FSNOTIFY_OBJ_TYPE_DETACHED;
13425     bool free_conn = false;
13426    
13427     /* Catch marks that were actually never attached to object */
13428     @@ -234,7 +256,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
13429     conn = mark->connector;
13430     hlist_del_init_rcu(&mark->obj_list);
13431     if (hlist_empty(&conn->list)) {
13432     - inode = fsnotify_detach_connector_from_object(conn);
13433     + objp = fsnotify_detach_connector_from_object(conn, &type);
13434     free_conn = true;
13435     } else {
13436     __fsnotify_recalc_mask(conn);
13437     @@ -242,7 +264,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
13438     mark->connector = NULL;
13439     spin_unlock(&conn->lock);
13440    
13441     - iput(inode);
13442     + fsnotify_drop_object(type, objp);
13443    
13444     if (free_conn) {
13445     spin_lock(&destroy_lock);
13446     @@ -709,7 +731,8 @@ void fsnotify_destroy_marks(fsnotify_connp_t *connp)
13447     {
13448     struct fsnotify_mark_connector *conn;
13449     struct fsnotify_mark *mark, *old_mark = NULL;
13450     - struct inode *inode;
13451     + void *objp;
13452     + unsigned int type;
13453    
13454     conn = fsnotify_grab_connector(connp);
13455     if (!conn)
13456     @@ -735,11 +758,11 @@ void fsnotify_destroy_marks(fsnotify_connp_t *connp)
13457     * mark references get dropped. It would lead to strange results such
13458     * as delaying inode deletion or blocking unmount.
13459     */
13460     - inode = fsnotify_detach_connector_from_object(conn);
13461     + objp = fsnotify_detach_connector_from_object(conn, &type);
13462     spin_unlock(&conn->lock);
13463     if (old_mark)
13464     fsnotify_put_mark(old_mark);
13465     - iput(inode);
13466     + fsnotify_drop_object(type, objp);
13467     }
13468    
13469     /*
13470     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
13471     index 5ea1d64cb0b4..a027473561c6 100644
13472     --- a/fs/proc/task_mmu.c
13473     +++ b/fs/proc/task_mmu.c
13474     @@ -713,6 +713,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
13475     smaps_walk.private = mss;
13476    
13477     #ifdef CONFIG_SHMEM
13478     + /* In case of smaps_rollup, reset the value from previous vma */
13479     + mss->check_shmem_swap = false;
13480     if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
13481     /*
13482     * For shared or readonly shmem mappings we know that all
13483     @@ -728,7 +730,7 @@ static void smap_gather_stats(struct vm_area_struct *vma,
13484    
13485     if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
13486     !(vma->vm_flags & VM_WRITE)) {
13487     - mss->swap = shmem_swapped;
13488     + mss->swap += shmem_swapped;
13489     } else {
13490     mss->check_shmem_swap = true;
13491     smaps_walk.pte_hole = smaps_pte_hole;
13492     diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
13493     index bfa0ec69f924..356d2b8568c1 100644
13494     --- a/fs/userfaultfd.c
13495     +++ b/fs/userfaultfd.c
13496     @@ -1026,7 +1026,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
13497     struct userfaultfd_ctx *fork_nctx = NULL;
13498    
13499     /* always take the fd_wqh lock before the fault_pending_wqh lock */
13500     - spin_lock(&ctx->fd_wqh.lock);
13501     + spin_lock_irq(&ctx->fd_wqh.lock);
13502     __add_wait_queue(&ctx->fd_wqh, &wait);
13503     for (;;) {
13504     set_current_state(TASK_INTERRUPTIBLE);
13505     @@ -1112,13 +1112,13 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
13506     ret = -EAGAIN;
13507     break;
13508     }
13509     - spin_unlock(&ctx->fd_wqh.lock);
13510     + spin_unlock_irq(&ctx->fd_wqh.lock);
13511     schedule();
13512     - spin_lock(&ctx->fd_wqh.lock);
13513     + spin_lock_irq(&ctx->fd_wqh.lock);
13514     }
13515     __remove_wait_queue(&ctx->fd_wqh, &wait);
13516     __set_current_state(TASK_RUNNING);
13517     - spin_unlock(&ctx->fd_wqh.lock);
13518     + spin_unlock_irq(&ctx->fd_wqh.lock);
13519    
13520     if (!ret && msg->event == UFFD_EVENT_FORK) {
13521     ret = resolve_userfault_fork(ctx, fork_nctx, msg);
13522     diff --git a/include/crypto/speck.h b/include/crypto/speck.h
13523     deleted file mode 100644
13524     index 73cfc952d405..000000000000
13525     --- a/include/crypto/speck.h
13526     +++ /dev/null
13527     @@ -1,62 +0,0 @@
13528     -// SPDX-License-Identifier: GPL-2.0
13529     -/*
13530     - * Common values for the Speck algorithm
13531     - */
13532     -
13533     -#ifndef _CRYPTO_SPECK_H
13534     -#define _CRYPTO_SPECK_H
13535     -
13536     -#include <linux/types.h>
13537     -
13538     -/* Speck128 */
13539     -
13540     -#define SPECK128_BLOCK_SIZE 16
13541     -
13542     -#define SPECK128_128_KEY_SIZE 16
13543     -#define SPECK128_128_NROUNDS 32
13544     -
13545     -#define SPECK128_192_KEY_SIZE 24
13546     -#define SPECK128_192_NROUNDS 33
13547     -
13548     -#define SPECK128_256_KEY_SIZE 32
13549     -#define SPECK128_256_NROUNDS 34
13550     -
13551     -struct speck128_tfm_ctx {
13552     - u64 round_keys[SPECK128_256_NROUNDS];
13553     - int nrounds;
13554     -};
13555     -
13556     -void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
13557     - u8 *out, const u8 *in);
13558     -
13559     -void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
13560     - u8 *out, const u8 *in);
13561     -
13562     -int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
13563     - unsigned int keysize);
13564     -
13565     -/* Speck64 */
13566     -
13567     -#define SPECK64_BLOCK_SIZE 8
13568     -
13569     -#define SPECK64_96_KEY_SIZE 12
13570     -#define SPECK64_96_NROUNDS 26
13571     -
13572     -#define SPECK64_128_KEY_SIZE 16
13573     -#define SPECK64_128_NROUNDS 27
13574     -
13575     -struct speck64_tfm_ctx {
13576     - u32 round_keys[SPECK64_128_NROUNDS];
13577     - int nrounds;
13578     -};
13579     -
13580     -void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
13581     - u8 *out, const u8 *in);
13582     -
13583     -void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
13584     - u8 *out, const u8 *in);
13585     -
13586     -int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
13587     - unsigned int keysize);
13588     -
13589     -#endif /* _CRYPTO_SPECK_H */
13590     diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
13591     index 38b04f559ad3..1fd6fa822d2c 100644
13592     --- a/include/linux/bpf_verifier.h
13593     +++ b/include/linux/bpf_verifier.h
13594     @@ -50,6 +50,9 @@ struct bpf_reg_state {
13595     * PTR_TO_MAP_VALUE_OR_NULL
13596     */
13597     struct bpf_map *map_ptr;
13598     +
13599     + /* Max size from any of the above. */
13600     + unsigned long raw;
13601     };
13602     /* Fixed part of pointer offset, pointer types only */
13603     s32 off;
13604     diff --git a/include/linux/compat.h b/include/linux/compat.h
13605     index 1a3c4f37e908..de0c13bdcd2c 100644
13606     --- a/include/linux/compat.h
13607     +++ b/include/linux/compat.h
13608     @@ -103,6 +103,9 @@ typedef struct compat_sigaltstack {
13609     compat_size_t ss_size;
13610     } compat_stack_t;
13611     #endif
13612     +#ifndef COMPAT_MINSIGSTKSZ
13613     +#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
13614     +#endif
13615    
13616     #define compat_jiffies_to_clock_t(x) \
13617     (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
13618     diff --git a/include/linux/fs.h b/include/linux/fs.h
13619     index 897eae8faee1..7b6084854bfe 100644
13620     --- a/include/linux/fs.h
13621     +++ b/include/linux/fs.h
13622     @@ -1428,6 +1428,9 @@ struct super_block {
13623     /* Number of inodes with nlink == 0 but still referenced */
13624     atomic_long_t s_remove_count;
13625    
13626     + /* Pending fsnotify inode refs */
13627     + atomic_long_t s_fsnotify_inode_refs;
13628     +
13629     /* Being remounted read-only */
13630     int s_readonly_remount;
13631    
13632     diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
13633     index d271ff23984f..4f3febc0f971 100644
13634     --- a/include/linux/hdmi.h
13635     +++ b/include/linux/hdmi.h
13636     @@ -101,8 +101,8 @@ enum hdmi_extended_colorimetry {
13637     HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
13638     HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
13639     HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
13640     - HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
13641     - HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
13642     + HDMI_EXTENDED_COLORIMETRY_OPYCC_601,
13643     + HDMI_EXTENDED_COLORIMETRY_OPRGB,
13644    
13645     /* The following EC values are only defined in CEA-861-F. */
13646     HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
13647     diff --git a/include/linux/nvme.h b/include/linux/nvme.h
13648     index 68e91ef5494c..818dbe9331be 100644
13649     --- a/include/linux/nvme.h
13650     +++ b/include/linux/nvme.h
13651     @@ -1241,6 +1241,7 @@ enum {
13652     NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
13653     NVME_SC_ANA_INACCESSIBLE = 0x302,
13654     NVME_SC_ANA_TRANSITION = 0x303,
13655     + NVME_SC_HOST_PATH_ERROR = 0x370,
13656    
13657     NVME_SC_DNR = 0x4000,
13658     };
13659     diff --git a/include/linux/signal.h b/include/linux/signal.h
13660     index 3d4cd5db30a9..e4d01469ed60 100644
13661     --- a/include/linux/signal.h
13662     +++ b/include/linux/signal.h
13663     @@ -36,7 +36,7 @@ enum siginfo_layout {
13664     SIL_SYS,
13665     };
13666    
13667     -enum siginfo_layout siginfo_layout(int sig, int si_code);
13668     +enum siginfo_layout siginfo_layout(unsigned sig, int si_code);
13669    
13670     /*
13671     * Define some primitives to manipulate sigset_t.
13672     diff --git a/include/linux/tc.h b/include/linux/tc.h
13673     index f92511e57cdb..a60639f37963 100644
13674     --- a/include/linux/tc.h
13675     +++ b/include/linux/tc.h
13676     @@ -84,6 +84,7 @@ struct tc_dev {
13677     device. */
13678     struct device dev; /* Generic device interface. */
13679     struct resource resource; /* Address space of this device. */
13680     + u64 dma_mask; /* DMA addressable range. */
13681     char vendor[9];
13682     char name[9];
13683     char firmware[9];
13684     diff --git a/include/linux/wait.h b/include/linux/wait.h
13685     index d9f131ecf708..ed7c122cb31f 100644
13686     --- a/include/linux/wait.h
13687     +++ b/include/linux/wait.h
13688     @@ -1052,10 +1052,9 @@ do { \
13689     __ret; \
13690     })
13691    
13692     -#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
13693     - lock, timeout) \
13694     +#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
13695     ___wait_event(wq_head, ___wait_cond_timeout(condition), \
13696     - TASK_INTERRUPTIBLE, 0, timeout, \
13697     + state, 0, timeout, \
13698     spin_unlock_irq(&lock); \
13699     __ret = schedule_timeout(__ret); \
13700     spin_lock_irq(&lock));
13701     @@ -1089,8 +1088,19 @@ do { \
13702     ({ \
13703     long __ret = timeout; \
13704     if (!___wait_cond_timeout(condition)) \
13705     - __ret = __wait_event_interruptible_lock_irq_timeout( \
13706     - wq_head, condition, lock, timeout); \
13707     + __ret = __wait_event_lock_irq_timeout( \
13708     + wq_head, condition, lock, timeout, \
13709     + TASK_INTERRUPTIBLE); \
13710     + __ret; \
13711     +})
13712     +
13713     +#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
13714     +({ \
13715     + long __ret = timeout; \
13716     + if (!___wait_cond_timeout(condition)) \
13717     + __ret = __wait_event_lock_irq_timeout( \
13718     + wq_head, condition, lock, timeout, \
13719     + TASK_UNINTERRUPTIBLE); \
13720     __ret; \
13721     })
13722    
13723     diff --git a/include/media/cec.h b/include/media/cec.h
13724     index ff9847f7f99d..9b7394a74dca 100644
13725     --- a/include/media/cec.h
13726     +++ b/include/media/cec.h
13727     @@ -63,7 +63,6 @@ struct cec_data {
13728     struct delayed_work work;
13729     struct completion c;
13730     u8 attempts;
13731     - bool new_initiator;
13732     bool blocking;
13733     bool completed;
13734     };
13735     @@ -174,6 +173,7 @@ struct cec_adapter {
13736     bool is_configuring;
13737     bool is_configured;
13738     bool cec_pin_is_high;
13739     + u8 last_initiator;
13740     u32 monitor_all_cnt;
13741     u32 monitor_pin_cnt;
13742     u32 follower_cnt;
13743     @@ -461,4 +461,74 @@ static inline void cec_phys_addr_invalidate(struct cec_adapter *adap)
13744     cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
13745     }
13746    
13747     +/**
13748     + * cec_get_edid_spa_location() - find location of the Source Physical Address
13749     + *
13750     + * @edid: the EDID
13751     + * @size: the size of the EDID
13752     + *
13753     + * This EDID is expected to be a CEA-861 compliant, which means that there are
13754     + * at least two blocks and one or more of the extensions blocks are CEA-861
13755     + * blocks.
13756     + *
13757     + * The returned location is guaranteed to be <= size-2.
13758     + *
13759     + * This is an inline function since it is used by both CEC and V4L2.
13760     + * Ideally this would go in a module shared by both, but it is overkill to do
13761     + * that for just a single function.
13762     + */
13763     +static inline unsigned int cec_get_edid_spa_location(const u8 *edid,
13764     + unsigned int size)
13765     +{
13766     + unsigned int blocks = size / 128;
13767     + unsigned int block;
13768     + u8 d;
13769     +
13770     + /* Sanity check: at least 2 blocks and a multiple of the block size */
13771     + if (blocks < 2 || size % 128)
13772     + return 0;
13773     +
13774     + /*
13775     + * If there are fewer extension blocks than the size, then update
13776     + * 'blocks'. It is allowed to have more extension blocks than the size,
13777     + * since some hardware can only read e.g. 256 bytes of the EDID, even
13778     + * though more blocks are present. The first CEA-861 extension block
13779     + * should normally be in block 1 anyway.
13780     + */
13781     + if (edid[0x7e] + 1 < blocks)
13782     + blocks = edid[0x7e] + 1;
13783     +
13784     + for (block = 1; block < blocks; block++) {
13785     + unsigned int offset = block * 128;
13786     +
13787     + /* Skip any non-CEA-861 extension blocks */
13788     + if (edid[offset] != 0x02 || edid[offset + 1] != 0x03)
13789     + continue;
13790     +
13791     + /* search Vendor Specific Data Block (tag 3) */
13792     + d = edid[offset + 2] & 0x7f;
13793     + /* Check if there are Data Blocks */
13794     + if (d <= 4)
13795     + continue;
13796     + if (d > 4) {
13797     + unsigned int i = offset + 4;
13798     + unsigned int end = offset + d;
13799     +
13800     + /* Note: 'end' is always < 'size' */
13801     + do {
13802     + u8 tag = edid[i] >> 5;
13803     + u8 len = edid[i] & 0x1f;
13804     +
13805     + if (tag == 3 && len >= 5 && i + len <= end &&
13806     + edid[i + 1] == 0x03 &&
13807     + edid[i + 2] == 0x0c &&
13808     + edid[i + 3] == 0x00)
13809     + return i + 4;
13810     + i += len + 1;
13811     + } while (i < end);
13812     + }
13813     + }
13814     + return 0;
13815     +}
13816     +
13817     #endif /* _MEDIA_CEC_H */
13818     diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
13819     index e950c2a68f06..ec299fcf55f7 100644
13820     --- a/include/rdma/ib_verbs.h
13821     +++ b/include/rdma/ib_verbs.h
13822     @@ -1278,21 +1278,27 @@ struct ib_qp_attr {
13823     };
13824    
13825     enum ib_wr_opcode {
13826     - IB_WR_RDMA_WRITE,
13827     - IB_WR_RDMA_WRITE_WITH_IMM,
13828     - IB_WR_SEND,
13829     - IB_WR_SEND_WITH_IMM,
13830     - IB_WR_RDMA_READ,
13831     - IB_WR_ATOMIC_CMP_AND_SWP,
13832     - IB_WR_ATOMIC_FETCH_AND_ADD,
13833     - IB_WR_LSO,
13834     - IB_WR_SEND_WITH_INV,
13835     - IB_WR_RDMA_READ_WITH_INV,
13836     - IB_WR_LOCAL_INV,
13837     - IB_WR_REG_MR,
13838     - IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
13839     - IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
13840     + /* These are shared with userspace */
13841     + IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
13842     + IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
13843     + IB_WR_SEND = IB_UVERBS_WR_SEND,
13844     + IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
13845     + IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
13846     + IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
13847     + IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
13848     + IB_WR_LSO = IB_UVERBS_WR_TSO,
13849     + IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
13850     + IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
13851     + IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
13852     + IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
13853     + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
13854     + IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
13855     + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
13856     +
13857     + /* These are kernel only and can not be issued by userspace */
13858     + IB_WR_REG_MR = 0x20,
13859     IB_WR_REG_SIG_MR,
13860     +
13861     /* reserve values for low level drivers' internal use.
13862     * These values will not be used at all in the ib core layer.
13863     */
13864     diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
13865     index 097fcd812471..3094af68b6e7 100644
13866     --- a/include/uapi/linux/cec.h
13867     +++ b/include/uapi/linux/cec.h
13868     @@ -152,10 +152,13 @@ static inline void cec_msg_set_reply_to(struct cec_msg *msg,
13869     #define CEC_TX_STATUS_LOW_DRIVE (1 << 3)
13870     #define CEC_TX_STATUS_ERROR (1 << 4)
13871     #define CEC_TX_STATUS_MAX_RETRIES (1 << 5)
13872     +#define CEC_TX_STATUS_ABORTED (1 << 6)
13873     +#define CEC_TX_STATUS_TIMEOUT (1 << 7)
13874    
13875     #define CEC_RX_STATUS_OK (1 << 0)
13876     #define CEC_RX_STATUS_TIMEOUT (1 << 1)
13877     #define CEC_RX_STATUS_FEATURE_ABORT (1 << 2)
13878     +#define CEC_RX_STATUS_ABORTED (1 << 3)
13879    
13880     static inline int cec_msg_status_is_ok(const struct cec_msg *msg)
13881     {
13882     diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
13883     index 73e01918f996..a441ea1bfe6d 100644
13884     --- a/include/uapi/linux/fs.h
13885     +++ b/include/uapi/linux/fs.h
13886     @@ -279,8 +279,8 @@ struct fsxattr {
13887     #define FS_ENCRYPTION_MODE_AES_256_CTS 4
13888     #define FS_ENCRYPTION_MODE_AES_128_CBC 5
13889     #define FS_ENCRYPTION_MODE_AES_128_CTS 6
13890     -#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7
13891     -#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8
13892     +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
13893     +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
13894    
13895     struct fscrypt_policy {
13896     __u8 version;
13897     diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
13898     index 7e27070b9440..2f2c43d633c5 100644
13899     --- a/include/uapi/linux/ndctl.h
13900     +++ b/include/uapi/linux/ndctl.h
13901     @@ -128,37 +128,31 @@ enum {
13902    
13903     static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
13904     {
13905     - static const char * const names[] = {
13906     - [ND_CMD_ARS_CAP] = "ars_cap",
13907     - [ND_CMD_ARS_START] = "ars_start",
13908     - [ND_CMD_ARS_STATUS] = "ars_status",
13909     - [ND_CMD_CLEAR_ERROR] = "clear_error",
13910     - [ND_CMD_CALL] = "cmd_call",
13911     - };
13912     -
13913     - if (cmd < ARRAY_SIZE(names) && names[cmd])
13914     - return names[cmd];
13915     - return "unknown";
13916     + switch (cmd) {
13917     + case ND_CMD_ARS_CAP: return "ars_cap";
13918     + case ND_CMD_ARS_START: return "ars_start";
13919     + case ND_CMD_ARS_STATUS: return "ars_status";
13920     + case ND_CMD_CLEAR_ERROR: return "clear_error";
13921     + case ND_CMD_CALL: return "cmd_call";
13922     + default: return "unknown";
13923     + }
13924     }
13925    
13926     static inline const char *nvdimm_cmd_name(unsigned cmd)
13927     {
13928     - static const char * const names[] = {
13929     - [ND_CMD_SMART] = "smart",
13930     - [ND_CMD_SMART_THRESHOLD] = "smart_thresh",
13931     - [ND_CMD_DIMM_FLAGS] = "flags",
13932     - [ND_CMD_GET_CONFIG_SIZE] = "get_size",
13933     - [ND_CMD_GET_CONFIG_DATA] = "get_data",
13934     - [ND_CMD_SET_CONFIG_DATA] = "set_data",
13935     - [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
13936     - [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
13937     - [ND_CMD_VENDOR] = "vendor",
13938     - [ND_CMD_CALL] = "cmd_call",
13939     - };
13940     -
13941     - if (cmd < ARRAY_SIZE(names) && names[cmd])
13942     - return names[cmd];
13943     - return "unknown";
13944     + switch (cmd) {
13945     + case ND_CMD_SMART: return "smart";
13946     + case ND_CMD_SMART_THRESHOLD: return "smart_thresh";
13947     + case ND_CMD_DIMM_FLAGS: return "flags";
13948     + case ND_CMD_GET_CONFIG_SIZE: return "get_size";
13949     + case ND_CMD_GET_CONFIG_DATA: return "get_data";
13950     + case ND_CMD_SET_CONFIG_DATA: return "set_data";
13951     + case ND_CMD_VENDOR_EFFECT_LOG_SIZE: return "effect_size";
13952     + case ND_CMD_VENDOR_EFFECT_LOG: return "effect_log";
13953     + case ND_CMD_VENDOR: return "vendor";
13954     + case ND_CMD_CALL: return "cmd_call";
13955     + default: return "unknown";
13956     + }
13957     }
13958    
13959     #define ND_IOCTL 'N'
13960     diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
13961     index 5d1a3685bea9..1aae2e4b8f10 100644
13962     --- a/include/uapi/linux/videodev2.h
13963     +++ b/include/uapi/linux/videodev2.h
13964     @@ -225,8 +225,8 @@ enum v4l2_colorspace {
13965     /* For RGB colorspaces such as produces by most webcams. */
13966     V4L2_COLORSPACE_SRGB = 8,
13967    
13968     - /* AdobeRGB colorspace */
13969     - V4L2_COLORSPACE_ADOBERGB = 9,
13970     + /* opRGB colorspace */
13971     + V4L2_COLORSPACE_OPRGB = 9,
13972    
13973     /* BT.2020 colorspace, used for UHDTV. */
13974     V4L2_COLORSPACE_BT2020 = 10,
13975     @@ -258,7 +258,7 @@ enum v4l2_xfer_func {
13976     *
13977     * V4L2_COLORSPACE_SRGB, V4L2_COLORSPACE_JPEG: V4L2_XFER_FUNC_SRGB
13978     *
13979     - * V4L2_COLORSPACE_ADOBERGB: V4L2_XFER_FUNC_ADOBERGB
13980     + * V4L2_COLORSPACE_OPRGB: V4L2_XFER_FUNC_OPRGB
13981     *
13982     * V4L2_COLORSPACE_SMPTE240M: V4L2_XFER_FUNC_SMPTE240M
13983     *
13984     @@ -269,7 +269,7 @@ enum v4l2_xfer_func {
13985     V4L2_XFER_FUNC_DEFAULT = 0,
13986     V4L2_XFER_FUNC_709 = 1,
13987     V4L2_XFER_FUNC_SRGB = 2,
13988     - V4L2_XFER_FUNC_ADOBERGB = 3,
13989     + V4L2_XFER_FUNC_OPRGB = 3,
13990     V4L2_XFER_FUNC_SMPTE240M = 4,
13991     V4L2_XFER_FUNC_NONE = 5,
13992     V4L2_XFER_FUNC_DCI_P3 = 6,
13993     @@ -281,7 +281,7 @@ enum v4l2_xfer_func {
13994     * This depends on the colorspace.
13995     */
13996     #define V4L2_MAP_XFER_FUNC_DEFAULT(colsp) \
13997     - ((colsp) == V4L2_COLORSPACE_ADOBERGB ? V4L2_XFER_FUNC_ADOBERGB : \
13998     + ((colsp) == V4L2_COLORSPACE_OPRGB ? V4L2_XFER_FUNC_OPRGB : \
13999     ((colsp) == V4L2_COLORSPACE_SMPTE240M ? V4L2_XFER_FUNC_SMPTE240M : \
14000     ((colsp) == V4L2_COLORSPACE_DCI_P3 ? V4L2_XFER_FUNC_DCI_P3 : \
14001     ((colsp) == V4L2_COLORSPACE_RAW ? V4L2_XFER_FUNC_NONE : \
14002     @@ -295,7 +295,7 @@ enum v4l2_ycbcr_encoding {
14003     *
14004     * V4L2_COLORSPACE_SMPTE170M, V4L2_COLORSPACE_470_SYSTEM_M,
14005     * V4L2_COLORSPACE_470_SYSTEM_BG, V4L2_COLORSPACE_SRGB,
14006     - * V4L2_COLORSPACE_ADOBERGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
14007     + * V4L2_COLORSPACE_OPRGB and V4L2_COLORSPACE_JPEG: V4L2_YCBCR_ENC_601
14008     *
14009     * V4L2_COLORSPACE_REC709 and V4L2_COLORSPACE_DCI_P3: V4L2_YCBCR_ENC_709
14010     *
14011     @@ -382,6 +382,17 @@ enum v4l2_quantization {
14012     (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
14013     V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
14014    
14015     +/*
14016     + * Deprecated names for opRGB colorspace (IEC 61966-2-5)
14017     + *
14018     + * WARNING: Please don't use these deprecated defines in your code, as
14019     + * there is a chance we have to remove them in the future.
14020     + */
14021     +#ifndef __KERNEL__
14022     +#define V4L2_COLORSPACE_ADOBERGB V4L2_COLORSPACE_OPRGB
14023     +#define V4L2_XFER_FUNC_ADOBERGB V4L2_XFER_FUNC_OPRGB
14024     +#endif
14025     +
14026     enum v4l2_priority {
14027     V4L2_PRIORITY_UNSET = 0, /* not initialized */
14028     V4L2_PRIORITY_BACKGROUND = 1,
14029     diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
14030     index 25a16760de2a..1254b51a551a 100644
14031     --- a/include/uapi/rdma/ib_user_verbs.h
14032     +++ b/include/uapi/rdma/ib_user_verbs.h
14033     @@ -763,10 +763,28 @@ struct ib_uverbs_sge {
14034     __u32 lkey;
14035     };
14036    
14037     +enum ib_uverbs_wr_opcode {
14038     + IB_UVERBS_WR_RDMA_WRITE = 0,
14039     + IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1,
14040     + IB_UVERBS_WR_SEND = 2,
14041     + IB_UVERBS_WR_SEND_WITH_IMM = 3,
14042     + IB_UVERBS_WR_RDMA_READ = 4,
14043     + IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5,
14044     + IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6,
14045     + IB_UVERBS_WR_LOCAL_INV = 7,
14046     + IB_UVERBS_WR_BIND_MW = 8,
14047     + IB_UVERBS_WR_SEND_WITH_INV = 9,
14048     + IB_UVERBS_WR_TSO = 10,
14049     + IB_UVERBS_WR_RDMA_READ_WITH_INV = 11,
14050     + IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
14051     + IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
14052     + /* Review enum ib_wr_opcode before modifying this */
14053     +};
14054     +
14055     struct ib_uverbs_send_wr {
14056     __aligned_u64 wr_id;
14057     __u32 num_sge;
14058     - __u32 opcode;
14059     + __u32 opcode; /* see enum ib_uverbs_wr_opcode */
14060     __u32 send_flags;
14061     union {
14062     __be32 imm_data;
14063     diff --git a/kernel/bounds.c b/kernel/bounds.c
14064     index c373e887c066..9795d75b09b2 100644
14065     --- a/kernel/bounds.c
14066     +++ b/kernel/bounds.c
14067     @@ -13,7 +13,7 @@
14068     #include <linux/log2.h>
14069     #include <linux/spinlock_types.h>
14070    
14071     -void foo(void)
14072     +int main(void)
14073     {
14074     /* The enum constants to put into include/generated/bounds.h */
14075     DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
14076     @@ -23,4 +23,6 @@ void foo(void)
14077     #endif
14078     DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
14079     /* End of constants */
14080     +
14081     + return 0;
14082     }
14083     diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
14084     index 8339d81cba1d..675eb6d36e47 100644
14085     --- a/kernel/bpf/syscall.c
14086     +++ b/kernel/bpf/syscall.c
14087     @@ -741,6 +741,17 @@ err_put:
14088     return err;
14089     }
14090    
14091     +static void maybe_wait_bpf_programs(struct bpf_map *map)
14092     +{
14093     + /* Wait for any running BPF programs to complete so that
14094     + * userspace, when we return to it, knows that all programs
14095     + * that could be running use the new map value.
14096     + */
14097     + if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
14098     + map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
14099     + synchronize_rcu();
14100     +}
14101     +
14102     #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
14103    
14104     static int map_update_elem(union bpf_attr *attr)
14105     @@ -831,6 +842,7 @@ static int map_update_elem(union bpf_attr *attr)
14106     }
14107     __this_cpu_dec(bpf_prog_active);
14108     preempt_enable();
14109     + maybe_wait_bpf_programs(map);
14110     out:
14111     free_value:
14112     kfree(value);
14113     @@ -883,6 +895,7 @@ static int map_delete_elem(union bpf_attr *attr)
14114     rcu_read_unlock();
14115     __this_cpu_dec(bpf_prog_active);
14116     preempt_enable();
14117     + maybe_wait_bpf_programs(map);
14118     out:
14119     kfree(key);
14120     err_put:
14121     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
14122     index 465952a8e465..5780876ac81a 100644
14123     --- a/kernel/bpf/verifier.c
14124     +++ b/kernel/bpf/verifier.c
14125     @@ -553,7 +553,9 @@ static void __mark_reg_not_init(struct bpf_reg_state *reg);
14126     */
14127     static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
14128     {
14129     - reg->id = 0;
14130     + /* Clear id, off, and union(map_ptr, range) */
14131     + memset(((u8 *)reg) + sizeof(reg->type), 0,
14132     + offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
14133     reg->var_off = tnum_const(imm);
14134     reg->smin_value = (s64)imm;
14135     reg->smax_value = (s64)imm;
14136     @@ -572,7 +574,6 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
14137     static void __mark_reg_const_zero(struct bpf_reg_state *reg)
14138     {
14139     __mark_reg_known(reg, 0);
14140     - reg->off = 0;
14141     reg->type = SCALAR_VALUE;
14142     }
14143    
14144     @@ -683,9 +684,12 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
14145     /* Mark a register as having a completely unknown (scalar) value. */
14146     static void __mark_reg_unknown(struct bpf_reg_state *reg)
14147     {
14148     + /*
14149     + * Clear type, id, off, and union(map_ptr, range) and
14150     + * padding between 'type' and union
14151     + */
14152     + memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
14153     reg->type = SCALAR_VALUE;
14154     - reg->id = 0;
14155     - reg->off = 0;
14156     reg->var_off = tnum_unknown;
14157     reg->frameno = 0;
14158     __mark_reg_unbounded(reg);
14159     @@ -1727,9 +1731,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
14160     else
14161     mark_reg_known_zero(env, regs,
14162     value_regno);
14163     - regs[value_regno].id = 0;
14164     - regs[value_regno].off = 0;
14165     - regs[value_regno].range = 0;
14166     regs[value_regno].type = reg_type;
14167     }
14168    
14169     @@ -2580,7 +2581,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
14170     regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
14171     /* There is no offset yet applied, variable or fixed */
14172     mark_reg_known_zero(env, regs, BPF_REG_0);
14173     - regs[BPF_REG_0].off = 0;
14174     /* remember map_ptr, so that check_map_access()
14175     * can check 'value_size' boundary of memory access
14176     * to map element returned from bpf_map_lookup_elem()
14177     @@ -2762,7 +2762,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
14178     dst_reg->umax_value = umax_ptr;
14179     dst_reg->var_off = ptr_reg->var_off;
14180     dst_reg->off = ptr_reg->off + smin_val;
14181     - dst_reg->range = ptr_reg->range;
14182     + dst_reg->raw = ptr_reg->raw;
14183     break;
14184     }
14185     /* A new variable offset is created. Note that off_reg->off
14186     @@ -2792,10 +2792,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
14187     }
14188     dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
14189     dst_reg->off = ptr_reg->off;
14190     + dst_reg->raw = ptr_reg->raw;
14191     if (reg_is_pkt_pointer(ptr_reg)) {
14192     dst_reg->id = ++env->id_gen;
14193     /* something was added to pkt_ptr, set range to zero */
14194     - dst_reg->range = 0;
14195     + dst_reg->raw = 0;
14196     }
14197     break;
14198     case BPF_SUB:
14199     @@ -2824,7 +2825,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
14200     dst_reg->var_off = ptr_reg->var_off;
14201     dst_reg->id = ptr_reg->id;
14202     dst_reg->off = ptr_reg->off - smin_val;
14203     - dst_reg->range = ptr_reg->range;
14204     + dst_reg->raw = ptr_reg->raw;
14205     break;
14206     }
14207     /* A new variable offset is created. If the subtrahend is known
14208     @@ -2850,11 +2851,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
14209     }
14210     dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
14211     dst_reg->off = ptr_reg->off;
14212     + dst_reg->raw = ptr_reg->raw;
14213     if (reg_is_pkt_pointer(ptr_reg)) {
14214     dst_reg->id = ++env->id_gen;
14215     /* something was added to pkt_ptr, set range to zero */
14216     if (smin_val < 0)
14217     - dst_reg->range = 0;
14218     + dst_reg->raw = 0;
14219     }
14220     break;
14221     case BPF_AND:
14222     diff --git a/kernel/cpu.c b/kernel/cpu.c
14223     index 0097acec1c71..3adecda21444 100644
14224     --- a/kernel/cpu.c
14225     +++ b/kernel/cpu.c
14226     @@ -2026,6 +2026,12 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
14227     kobject_uevent(&dev->kobj, KOBJ_ONLINE);
14228     }
14229    
14230     +/*
14231     + * Architectures that need SMT-specific errata handling during SMT hotplug
14232     + * should override this.
14233     + */
14234     +void __weak arch_smt_update(void) { };
14235     +
14236     static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
14237     {
14238     int cpu, ret = 0;
14239     @@ -2052,8 +2058,10 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
14240     */
14241     cpuhp_offline_cpu_device(cpu);
14242     }
14243     - if (!ret)
14244     + if (!ret) {
14245     cpu_smt_control = ctrlval;
14246     + arch_smt_update();
14247     + }
14248     cpu_maps_update_done();
14249     return ret;
14250     }
14251     @@ -2064,6 +2072,7 @@ static int cpuhp_smt_enable(void)
14252    
14253     cpu_maps_update_begin();
14254     cpu_smt_control = CPU_SMT_ENABLED;
14255     + arch_smt_update();
14256     for_each_present_cpu(cpu) {
14257     /* Skip online CPUs and CPUs on offline nodes */
14258     if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
14259     diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
14260     index 286d82329eb0..b2a87905846d 100644
14261     --- a/kernel/dma/contiguous.c
14262     +++ b/kernel/dma/contiguous.c
14263     @@ -49,7 +49,11 @@ static phys_addr_t limit_cmdline;
14264    
14265     static int __init early_cma(char *p)
14266     {
14267     - pr_debug("%s(%s)\n", __func__, p);
14268     + if (!p) {
14269     + pr_err("Config string not provided\n");
14270     + return -EINVAL;
14271     + }
14272     +
14273     size_cmdline = memparse(p, &p);
14274     if (*p != '@')
14275     return 0;
14276     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
14277     index fb86146037a7..9dbdccab3b6a 100644
14278     --- a/kernel/irq/manage.c
14279     +++ b/kernel/irq/manage.c
14280     @@ -927,6 +927,9 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
14281    
14282     local_bh_disable();
14283     ret = action->thread_fn(action->irq, action->dev_id);
14284     + if (ret == IRQ_HANDLED)
14285     + atomic_inc(&desc->threads_handled);
14286     +
14287     irq_finalize_oneshot(desc, action);
14288     local_bh_enable();
14289     return ret;
14290     @@ -943,6 +946,9 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
14291     irqreturn_t ret;
14292    
14293     ret = action->thread_fn(action->irq, action->dev_id);
14294     + if (ret == IRQ_HANDLED)
14295     + atomic_inc(&desc->threads_handled);
14296     +
14297     irq_finalize_oneshot(desc, action);
14298     return ret;
14299     }
14300     @@ -1020,8 +1026,6 @@ static int irq_thread(void *data)
14301     irq_thread_check_affinity(desc, action);
14302    
14303     action_ret = handler_fn(desc, action);
14304     - if (action_ret == IRQ_HANDLED)
14305     - atomic_inc(&desc->threads_handled);
14306     if (action_ret == IRQ_WAKE_THREAD)
14307     irq_wake_secondary(desc, action);
14308    
14309     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
14310     index ab257be4d924..4344381664cc 100644
14311     --- a/kernel/kprobes.c
14312     +++ b/kernel/kprobes.c
14313     @@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
14314     }
14315    
14316     /* Cancel unoptimizing for reusing */
14317     -static void reuse_unused_kprobe(struct kprobe *ap)
14318     +static int reuse_unused_kprobe(struct kprobe *ap)
14319     {
14320     struct optimized_kprobe *op;
14321     + int ret;
14322    
14323     BUG_ON(!kprobe_unused(ap));
14324     /*
14325     @@ -714,8 +715,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
14326     /* Enable the probe again */
14327     ap->flags &= ~KPROBE_FLAG_DISABLED;
14328     /* Optimize it again (remove from op->list) */
14329     - BUG_ON(!kprobe_optready(ap));
14330     + ret = kprobe_optready(ap);
14331     + if (ret)
14332     + return ret;
14333     +
14334     optimize_kprobe(ap);
14335     + return 0;
14336     }
14337    
14338     /* Remove optimized instructions */
14339     @@ -940,11 +945,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
14340     #define kprobe_disarmed(p) kprobe_disabled(p)
14341     #define wait_for_kprobe_optimizer() do {} while (0)
14342    
14343     -/* There should be no unused kprobes can be reused without optimization */
14344     -static void reuse_unused_kprobe(struct kprobe *ap)
14345     +static int reuse_unused_kprobe(struct kprobe *ap)
14346     {
14347     + /*
14348     + * If the optimized kprobe is NOT supported, the aggr kprobe is
14349     + * released at the same time that the last aggregated kprobe is
14350     + * unregistered.
14351     + * Thus there should be no chance to reuse unused kprobe.
14352     + */
14353     printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
14354     - BUG_ON(kprobe_unused(ap));
14355     + return -EINVAL;
14356     }
14357    
14358     static void free_aggr_kprobe(struct kprobe *p)
14359     @@ -1318,9 +1328,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
14360     goto out;
14361     }
14362     init_aggr_kprobe(ap, orig_p);
14363     - } else if (kprobe_unused(ap))
14364     + } else if (kprobe_unused(ap)) {
14365     /* This probe is going to die. Rescue it */
14366     - reuse_unused_kprobe(ap);
14367     + ret = reuse_unused_kprobe(ap);
14368     + if (ret)
14369     + goto out;
14370     + }
14371    
14372     if (kprobe_gone(ap)) {
14373     /*
14374     diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
14375     index dd13f865ad40..26b57e24476f 100644
14376     --- a/kernel/locking/lockdep.c
14377     +++ b/kernel/locking/lockdep.c
14378     @@ -4122,7 +4122,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
14379     {
14380     unsigned long flags;
14381    
14382     - if (unlikely(!lock_stat))
14383     + if (unlikely(!lock_stat || !debug_locks))
14384     return;
14385    
14386     if (unlikely(current->lockdep_recursion))
14387     @@ -4142,7 +4142,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
14388     {
14389     unsigned long flags;
14390    
14391     - if (unlikely(!lock_stat))
14392     + if (unlikely(!lock_stat || !debug_locks))
14393     return;
14394    
14395     if (unlikely(current->lockdep_recursion))
14396     diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
14397     index 9bf5404397e0..06045abd1887 100644
14398     --- a/kernel/printk/printk.c
14399     +++ b/kernel/printk/printk.c
14400     @@ -1048,7 +1048,12 @@ static void __init log_buf_len_update(unsigned size)
14401     /* save requested log_buf_len since it's too early to process it */
14402     static int __init log_buf_len_setup(char *str)
14403     {
14404     - unsigned size = memparse(str, &str);
14405     + unsigned int size;
14406     +
14407     + if (!str)
14408     + return -EINVAL;
14409     +
14410     + size = memparse(str, &str);
14411    
14412     log_buf_len_update(size);
14413    
14414     diff --git a/kernel/signal.c b/kernel/signal.c
14415     index 5843c541fda9..edc28afc9fb4 100644
14416     --- a/kernel/signal.c
14417     +++ b/kernel/signal.c
14418     @@ -1035,7 +1035,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
14419    
14420     result = TRACE_SIGNAL_IGNORED;
14421     if (!prepare_signal(sig, t,
14422     - from_ancestor_ns || (info == SEND_SIG_FORCED)))
14423     + from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
14424     goto ret;
14425    
14426     pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
14427     @@ -2847,7 +2847,7 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
14428     }
14429     #endif
14430    
14431     -enum siginfo_layout siginfo_layout(int sig, int si_code)
14432     +enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
14433     {
14434     enum siginfo_layout layout = SIL_KILL;
14435     if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
14436     @@ -3460,7 +3460,8 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
14437     }
14438    
14439     static int
14440     -do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
14441     +do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
14442     + size_t min_ss_size)
14443     {
14444     struct task_struct *t = current;
14445    
14446     @@ -3490,7 +3491,7 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
14447     ss_size = 0;
14448     ss_sp = NULL;
14449     } else {
14450     - if (unlikely(ss_size < MINSIGSTKSZ))
14451     + if (unlikely(ss_size < min_ss_size))
14452     return -ENOMEM;
14453     }
14454    
14455     @@ -3508,7 +3509,8 @@ SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
14456     if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
14457     return -EFAULT;
14458     err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
14459     - current_user_stack_pointer());
14460     + current_user_stack_pointer(),
14461     + MINSIGSTKSZ);
14462     if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
14463     err = -EFAULT;
14464     return err;
14465     @@ -3519,7 +3521,8 @@ int restore_altstack(const stack_t __user *uss)
14466     stack_t new;
14467     if (copy_from_user(&new, uss, sizeof(stack_t)))
14468     return -EFAULT;
14469     - (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
14470     + (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
14471     + MINSIGSTKSZ);
14472     /* squash all but EFAULT for now */
14473     return 0;
14474     }
14475     @@ -3553,7 +3556,8 @@ static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
14476     uss.ss_size = uss32.ss_size;
14477     }
14478     ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
14479     - compat_user_stack_pointer());
14480     + compat_user_stack_pointer(),
14481     + COMPAT_MINSIGSTKSZ);
14482     if (ret >= 0 && uoss_ptr) {
14483     compat_stack_t old;
14484     memset(&old, 0, sizeof(old));
14485     diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
14486     index d239004aaf29..eb908ef2ecec 100644
14487     --- a/kernel/trace/trace_events_hist.c
14488     +++ b/kernel/trace/trace_events_hist.c
14489     @@ -1063,8 +1063,10 @@ static int create_synth_event(int argc, char **argv)
14490     event = NULL;
14491     ret = -EEXIST;
14492     goto out;
14493     - } else if (delete_event)
14494     + } else if (delete_event) {
14495     + ret = -ENOENT;
14496     goto out;
14497     + }
14498    
14499     if (argc < 2) {
14500     ret = -EINVAL;
14501     diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
14502     index e5222b5fb4fe..923414a246e9 100644
14503     --- a/kernel/user_namespace.c
14504     +++ b/kernel/user_namespace.c
14505     @@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
14506     if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
14507     goto out;
14508    
14509     - ret = sort_idmaps(&new_map);
14510     - if (ret < 0)
14511     - goto out;
14512     -
14513     ret = -EPERM;
14514     /* Map the lower ids from the parent user namespace to the
14515     * kernel global id space.
14516     @@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
14517     e->lower_first = lower_first;
14518     }
14519    
14520     + /*
14521     + * If we want to use binary search for lookup, this clones the extent
14522     + * array and sorts both copies.
14523     + */
14524     + ret = sort_idmaps(&new_map);
14525     + if (ret < 0)
14526     + goto out;
14527     +
14528     /* Install the map */
14529     if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
14530     memcpy(map->extent, new_map.extent,
14531     diff --git a/lib/debug_locks.c b/lib/debug_locks.c
14532     index 96c4c633d95e..124fdf238b3d 100644
14533     --- a/lib/debug_locks.c
14534     +++ b/lib/debug_locks.c
14535     @@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
14536     */
14537     int debug_locks_off(void)
14538     {
14539     - if (__debug_locks_off()) {
14540     + if (debug_locks && __debug_locks_off()) {
14541     if (!debug_locks_silent) {
14542     console_verbose();
14543     return 1;
14544     diff --git a/mm/hmm.c b/mm/hmm.c
14545     index c968e49f7a0c..90193a7fabce 100644
14546     --- a/mm/hmm.c
14547     +++ b/mm/hmm.c
14548     @@ -91,16 +91,6 @@ static struct hmm *hmm_register(struct mm_struct *mm)
14549     spin_lock_init(&hmm->lock);
14550     hmm->mm = mm;
14551    
14552     - /*
14553     - * We should only get here if hold the mmap_sem in write mode ie on
14554     - * registration of first mirror through hmm_mirror_register()
14555     - */
14556     - hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
14557     - if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
14558     - kfree(hmm);
14559     - return NULL;
14560     - }
14561     -
14562     spin_lock(&mm->page_table_lock);
14563     if (!mm->hmm)
14564     mm->hmm = hmm;
14565     @@ -108,12 +98,27 @@ static struct hmm *hmm_register(struct mm_struct *mm)
14566     cleanup = true;
14567     spin_unlock(&mm->page_table_lock);
14568    
14569     - if (cleanup) {
14570     - mmu_notifier_unregister(&hmm->mmu_notifier, mm);
14571     - kfree(hmm);
14572     - }
14573     + if (cleanup)
14574     + goto error;
14575     +
14576     + /*
14577     + * We should only get here if hold the mmap_sem in write mode ie on
14578     + * registration of first mirror through hmm_mirror_register()
14579     + */
14580     + hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
14581     + if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
14582     + goto error_mm;
14583    
14584     return mm->hmm;
14585     +
14586     +error_mm:
14587     + spin_lock(&mm->page_table_lock);
14588     + if (mm->hmm == hmm)
14589     + mm->hmm = NULL;
14590     + spin_unlock(&mm->page_table_lock);
14591     +error:
14592     + kfree(hmm);
14593     + return NULL;
14594     }
14595    
14596     void hmm_mm_destroy(struct mm_struct *mm)
14597     @@ -278,12 +283,13 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror)
14598     if (!should_unregister || mm == NULL)
14599     return;
14600    
14601     + mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
14602     +
14603     spin_lock(&mm->page_table_lock);
14604     if (mm->hmm == hmm)
14605     mm->hmm = NULL;
14606     spin_unlock(&mm->page_table_lock);
14607    
14608     - mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
14609     kfree(hmm);
14610     }
14611     EXPORT_SYMBOL(hmm_mirror_unregister);
14612     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
14613     index 5c390f5a5207..7b5c0ad9a6bd 100644
14614     --- a/mm/hugetlb.c
14615     +++ b/mm/hugetlb.c
14616     @@ -3690,6 +3690,12 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
14617     return err;
14618     ClearPagePrivate(page);
14619    
14620     + /*
14621     + * set page dirty so that it will not be removed from cache/file
14622     + * by non-hugetlbfs specific code paths.
14623     + */
14624     + set_page_dirty(page);
14625     +
14626     spin_lock(&inode->i_lock);
14627     inode->i_blocks += blocks_per_huge_page(h);
14628     spin_unlock(&inode->i_lock);
14629     diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
14630     index ae3c2a35d61b..11df03e71288 100644
14631     --- a/mm/page_vma_mapped.c
14632     +++ b/mm/page_vma_mapped.c
14633     @@ -21,7 +21,29 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
14634     if (!is_swap_pte(*pvmw->pte))
14635     return false;
14636     } else {
14637     - if (!pte_present(*pvmw->pte))
14638     + /*
14639     + * We get here when we are trying to unmap a private
14640     + * device page from the process address space. Such
14641     + * page is not CPU accessible and thus is mapped as
14642     + * a special swap entry, nonetheless it still does
14643     + * count as a valid regular mapping for the page (and
14644     + * is accounted as such in page maps count).
14645     + *
14646     + * So handle this special case as if it was a normal
14647     + * page mapping ie lock CPU page table and returns
14648     + * true.
14649     + *
14650     + * For more details on device private memory see HMM
14651     + * (include/linux/hmm.h or mm/hmm.c).
14652     + */
14653     + if (is_swap_pte(*pvmw->pte)) {
14654     + swp_entry_t entry;
14655     +
14656     + /* Handle un-addressable ZONE_DEVICE memory */
14657     + entry = pte_to_swp_entry(*pvmw->pte);
14658     + if (!is_device_private_entry(entry))
14659     + return false;
14660     + } else if (!pte_present(*pvmw->pte))
14661     return false;
14662     }
14663     }
14664     diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
14665     index 5e4f04004a49..7bf833598615 100644
14666     --- a/net/core/netclassid_cgroup.c
14667     +++ b/net/core/netclassid_cgroup.c
14668     @@ -106,6 +106,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
14669     iterate_fd(p->files, 0, update_classid_sock,
14670     (void *)(unsigned long)cs->classid);
14671     task_unlock(p);
14672     + cond_resched();
14673     }
14674     css_task_iter_end(&it);
14675    
14676     diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
14677     index 82178cc69c96..777fa3b7fb13 100644
14678     --- a/net/ipv4/cipso_ipv4.c
14679     +++ b/net/ipv4/cipso_ipv4.c
14680     @@ -1512,7 +1512,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
14681     *
14682     * Description:
14683     * Parse the packet's IP header looking for a CIPSO option. Returns a pointer
14684     - * to the start of the CIPSO option on success, NULL if one if not found.
14685     + * to the start of the CIPSO option on success, NULL if one is not found.
14686     *
14687     */
14688     unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
14689     @@ -1522,10 +1522,8 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
14690     int optlen;
14691     int taglen;
14692    
14693     - for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
14694     + for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
14695     switch (optptr[0]) {
14696     - case IPOPT_CIPSO:
14697     - return optptr;
14698     case IPOPT_END:
14699     return NULL;
14700     case IPOPT_NOOP:
14701     @@ -1534,6 +1532,11 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
14702     default:
14703     taglen = optptr[1];
14704     }
14705     + if (!taglen || taglen > optlen)
14706     + return NULL;
14707     + if (optptr[0] == IPOPT_CIPSO)
14708     + return optptr;
14709     +
14710     optlen -= taglen;
14711     optptr += taglen;
14712     }
14713     diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
14714     index 8af9707f8789..ac91170fc8c8 100644
14715     --- a/net/netfilter/xt_nat.c
14716     +++ b/net/netfilter/xt_nat.c
14717     @@ -216,6 +216,8 @@ static struct xt_target xt_nat_target_reg[] __read_mostly = {
14718     {
14719     .name = "DNAT",
14720     .revision = 2,
14721     + .checkentry = xt_nat_checkentry,
14722     + .destroy = xt_nat_destroy,
14723     .target = xt_dnat_target_v2,
14724     .targetsize = sizeof(struct nf_nat_range2),
14725     .table = "nat",
14726     diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
14727     index 3dc0acf54245..be7cd140b2a3 100644
14728     --- a/net/sched/sch_api.c
14729     +++ b/net/sched/sch_api.c
14730     @@ -1309,7 +1309,6 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
14731    
14732     const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
14733     [TCA_KIND] = { .type = NLA_STRING },
14734     - [TCA_OPTIONS] = { .type = NLA_NESTED },
14735     [TCA_RATE] = { .type = NLA_BINARY,
14736     .len = sizeof(struct tc_estimator) },
14737     [TCA_STAB] = { .type = NLA_NESTED },
14738     diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
14739     index 5185efb9027b..83ccd0221c98 100644
14740     --- a/net/sunrpc/svc_xprt.c
14741     +++ b/net/sunrpc/svc_xprt.c
14742     @@ -989,7 +989,7 @@ static void call_xpt_users(struct svc_xprt *xprt)
14743     spin_lock(&xprt->xpt_lock);
14744     while (!list_empty(&xprt->xpt_users)) {
14745     u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
14746     - list_del(&u->list);
14747     + list_del_init(&u->list);
14748     u->callback(u);
14749     }
14750     spin_unlock(&xprt->xpt_lock);
14751     diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
14752     index a68180090554..b9827665ff35 100644
14753     --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
14754     +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
14755     @@ -248,6 +248,7 @@ static void
14756     xprt_rdma_bc_close(struct rpc_xprt *xprt)
14757     {
14758     dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
14759     + xprt->cwnd = RPC_CWNDSHIFT;
14760     }
14761    
14762     static void
14763     diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
14764     index 143ce2579ba9..98cbc7b060ba 100644
14765     --- a/net/sunrpc/xprtrdma/transport.c
14766     +++ b/net/sunrpc/xprtrdma/transport.c
14767     @@ -468,6 +468,12 @@ xprt_rdma_close(struct rpc_xprt *xprt)
14768     xprt->reestablish_timeout = 0;
14769     xprt_disconnect_done(xprt);
14770     rpcrdma_ep_disconnect(ep, ia);
14771     +
14772     + /* Prepare @xprt for the next connection by reinitializing
14773     + * its credit grant to one (see RFC 8166, Section 3.3.3).
14774     + */
14775     + r_xprt->rx_buf.rb_credits = 1;
14776     + xprt->cwnd = RPC_CWNDSHIFT;
14777     }
14778    
14779     /**
14780     diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
14781     index 7e7e7e7c250a..d9e7728027c6 100644
14782     --- a/security/integrity/ima/ima_crypto.c
14783     +++ b/security/integrity/ima/ima_crypto.c
14784     @@ -210,7 +210,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
14785     {
14786     loff_t i_size, offset;
14787     char *rbuf[2] = { NULL, };
14788     - int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
14789     + int rc, rbuf_len, active = 0, ahash_rc = 0;
14790     struct ahash_request *req;
14791     struct scatterlist sg[1];
14792     struct crypto_wait wait;
14793     @@ -257,11 +257,6 @@ static int ima_calc_file_hash_atfm(struct file *file,
14794     &rbuf_size[1], 0);
14795     }
14796    
14797     - if (!(file->f_mode & FMODE_READ)) {
14798     - file->f_mode |= FMODE_READ;
14799     - read = 1;
14800     - }
14801     -
14802     for (offset = 0; offset < i_size; offset += rbuf_len) {
14803     if (!rbuf[1] && offset) {
14804     /* Not using two buffers, and it is not the first
14805     @@ -300,8 +295,6 @@ static int ima_calc_file_hash_atfm(struct file *file,
14806     /* wait for the last update request to complete */
14807     rc = ahash_wait(ahash_rc, &wait);
14808     out3:
14809     - if (read)
14810     - file->f_mode &= ~FMODE_READ;
14811     ima_free_pages(rbuf[0], rbuf_size[0]);
14812     ima_free_pages(rbuf[1], rbuf_size[1]);
14813     out2:
14814     @@ -336,7 +329,7 @@ static int ima_calc_file_hash_tfm(struct file *file,
14815     {
14816     loff_t i_size, offset = 0;
14817     char *rbuf;
14818     - int rc, read = 0;
14819     + int rc;
14820     SHASH_DESC_ON_STACK(shash, tfm);
14821    
14822     shash->tfm = tfm;
14823     @@ -357,11 +350,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
14824     if (!rbuf)
14825     return -ENOMEM;
14826    
14827     - if (!(file->f_mode & FMODE_READ)) {
14828     - file->f_mode |= FMODE_READ;
14829     - read = 1;
14830     - }
14831     -
14832     while (offset < i_size) {
14833     int rbuf_len;
14834    
14835     @@ -378,8 +366,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
14836     if (rc)
14837     break;
14838     }
14839     - if (read)
14840     - file->f_mode &= ~FMODE_READ;
14841     kfree(rbuf);
14842     out:
14843     if (!rc)
14844     @@ -420,6 +406,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
14845     {
14846     loff_t i_size;
14847     int rc;
14848     + struct file *f = file;
14849     + bool new_file_instance = false, modified_flags = false;
14850    
14851     /*
14852     * For consistency, fail file's opened with the O_DIRECT flag on
14853     @@ -431,15 +419,41 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
14854     return -EINVAL;
14855     }
14856    
14857     - i_size = i_size_read(file_inode(file));
14858     + /* Open a new file instance in O_RDONLY if we cannot read */
14859     + if (!(file->f_mode & FMODE_READ)) {
14860     + int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
14861     + O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
14862     + flags |= O_RDONLY;
14863     + f = dentry_open(&file->f_path, flags, file->f_cred);
14864     + if (IS_ERR(f)) {
14865     + /*
14866     + * Cannot open the file again, lets modify f_flags
14867     + * of original and continue
14868     + */
14869     + pr_info_ratelimited("Unable to reopen file for reading.\n");
14870     + f = file;
14871     + f->f_flags |= FMODE_READ;
14872     + modified_flags = true;
14873     + } else {
14874     + new_file_instance = true;
14875     + }
14876     + }
14877     +
14878     + i_size = i_size_read(file_inode(f));
14879    
14880     if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
14881     - rc = ima_calc_file_ahash(file, hash);
14882     + rc = ima_calc_file_ahash(f, hash);
14883     if (!rc)
14884     - return 0;
14885     + goto out;
14886     }
14887    
14888     - return ima_calc_file_shash(file, hash);
14889     + rc = ima_calc_file_shash(f, hash);
14890     +out:
14891     + if (new_file_instance)
14892     + fput(f);
14893     + else if (modified_flags)
14894     + f->f_flags &= ~FMODE_READ;
14895     + return rc;
14896     }
14897    
14898     /*
14899     diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
14900     index ae9d5c766a3c..cfb8cc3b975e 100644
14901     --- a/security/integrity/ima/ima_fs.c
14902     +++ b/security/integrity/ima/ima_fs.c
14903     @@ -42,14 +42,14 @@ static int __init default_canonical_fmt_setup(char *str)
14904     __setup("ima_canonical_fmt", default_canonical_fmt_setup);
14905    
14906     static int valid_policy = 1;
14907     -#define TMPBUFLEN 12
14908     +
14909     static ssize_t ima_show_htable_value(char __user *buf, size_t count,
14910     loff_t *ppos, atomic_long_t *val)
14911     {
14912     - char tmpbuf[TMPBUFLEN];
14913     + char tmpbuf[32]; /* greater than largest 'long' string value */
14914     ssize_t len;
14915    
14916     - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
14917     + len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
14918     return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
14919     }
14920    
14921     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
14922     index ad9a9b8e9979..18b98b5e1e3c 100644
14923     --- a/security/selinux/hooks.c
14924     +++ b/security/selinux/hooks.c
14925     @@ -1508,6 +1508,11 @@ static int selinux_genfs_get_sid(struct dentry *dentry,
14926     }
14927     rc = security_genfs_sid(&selinux_state, sb->s_type->name,
14928     path, tclass, sid);
14929     + if (rc == -ENOENT) {
14930     + /* No match in policy, mark as unlabeled. */
14931     + *sid = SECINITSID_UNLABELED;
14932     + rc = 0;
14933     + }
14934     }
14935     free_page((unsigned long)buffer);
14936     return rc;
14937     diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
14938     index 340fc30ad85d..70d3066e69fe 100644
14939     --- a/security/smack/smack_lsm.c
14940     +++ b/security/smack/smack_lsm.c
14941     @@ -421,6 +421,7 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
14942     struct smk_audit_info ad, *saip = NULL;
14943     struct task_smack *tsp;
14944     struct smack_known *tracer_known;
14945     + const struct cred *tracercred;
14946    
14947     if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
14948     smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
14949     @@ -429,7 +430,8 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
14950     }
14951    
14952     rcu_read_lock();
14953     - tsp = __task_cred(tracer)->security;
14954     + tracercred = __task_cred(tracer);
14955     + tsp = tracercred->security;
14956     tracer_known = smk_of_task(tsp);
14957    
14958     if ((mode & PTRACE_MODE_ATTACH) &&
14959     @@ -439,7 +441,7 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
14960     rc = 0;
14961     else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
14962     rc = -EACCES;
14963     - else if (capable(CAP_SYS_PTRACE))
14964     + else if (smack_privileged_cred(CAP_SYS_PTRACE, tracercred))
14965     rc = 0;
14966     else
14967     rc = -EACCES;
14968     @@ -1841,6 +1843,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
14969     {
14970     struct smack_known *skp;
14971     struct smack_known *tkp = smk_of_task(tsk->cred->security);
14972     + const struct cred *tcred;
14973     struct file *file;
14974     int rc;
14975     struct smk_audit_info ad;
14976     @@ -1854,8 +1857,12 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
14977     skp = file->f_security;
14978     rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
14979     rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc);
14980     - if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
14981     +
14982     + rcu_read_lock();
14983     + tcred = __task_cred(tsk);
14984     + if (rc != 0 && smack_privileged_cred(CAP_MAC_OVERRIDE, tcred))
14985     rc = 0;
14986     + rcu_read_unlock();
14987    
14988     smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
14989     smk_ad_setfield_u_tsk(&ad, tsk);
14990     diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
14991     index 04402c14cb23..9847b669cf3c 100644
14992     --- a/sound/pci/ca0106/ca0106.h
14993     +++ b/sound/pci/ca0106/ca0106.h
14994     @@ -582,7 +582,7 @@
14995     #define SPI_PL_BIT_R_R (2<<7) /* right channel = right */
14996     #define SPI_PL_BIT_R_C (3<<7) /* right channel = (L+R)/2 */
14997     #define SPI_IZD_REG 2
14998     -#define SPI_IZD_BIT (1<<4) /* infinite zero detect */
14999     +#define SPI_IZD_BIT (0<<4) /* infinite zero detect */
15000    
15001     #define SPI_FMT_REG 3
15002     #define SPI_FMT_BIT_RJ (0<<0) /* right justified mode */
15003     diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
15004     index a68e75b00ea3..53c3cd28bc99 100644
15005     --- a/sound/pci/hda/hda_controller.h
15006     +++ b/sound/pci/hda/hda_controller.h
15007     @@ -160,6 +160,7 @@ struct azx {
15008     unsigned int msi:1;
15009     unsigned int probing:1; /* codec probing phase */
15010     unsigned int snoop:1;
15011     + unsigned int uc_buffer:1; /* non-cached pages for stream buffers */
15012     unsigned int align_buffer_size:1;
15013     unsigned int region_requested:1;
15014     unsigned int disabled:1; /* disabled by vga_switcheroo */
15015     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
15016     index aa4c672dbaf7..625cb6c7b7d6 100644
15017     --- a/sound/pci/hda/hda_intel.c
15018     +++ b/sound/pci/hda/hda_intel.c
15019     @@ -412,7 +412,7 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
15020     #ifdef CONFIG_SND_DMA_SGBUF
15021     if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
15022     struct snd_sg_buf *sgbuf = dmab->private_data;
15023     - if (chip->driver_type == AZX_DRIVER_CMEDIA)
15024     + if (!chip->uc_buffer)
15025     return; /* deal with only CORB/RIRB buffers */
15026     if (on)
15027     set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
15028     @@ -1678,6 +1678,7 @@ static void azx_check_snoop_available(struct azx *chip)
15029     dev_info(chip->card->dev, "Force to %s mode by module option\n",
15030     snoop ? "snoop" : "non-snoop");
15031     chip->snoop = snoop;
15032     + chip->uc_buffer = !snoop;
15033     return;
15034     }
15035    
15036     @@ -1698,8 +1699,12 @@ static void azx_check_snoop_available(struct azx *chip)
15037     snoop = false;
15038    
15039     chip->snoop = snoop;
15040     - if (!snoop)
15041     + if (!snoop) {
15042     dev_info(chip->card->dev, "Force to non-snoop mode\n");
15043     + /* C-Media requires non-cached pages only for CORB/RIRB */
15044     + if (chip->driver_type != AZX_DRIVER_CMEDIA)
15045     + chip->uc_buffer = true;
15046     + }
15047     }
15048    
15049     static void azx_probe_work(struct work_struct *work)
15050     @@ -2138,7 +2143,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
15051     #ifdef CONFIG_X86
15052     struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
15053     struct azx *chip = apcm->chip;
15054     - if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
15055     + if (chip->uc_buffer)
15056     area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
15057     #endif
15058     }
15059     @@ -2257,8 +2262,12 @@ static struct snd_pci_quirk power_save_blacklist[] = {
15060     /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
15061     SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
15062     /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
15063     + SND_PCI_QUIRK(0x1028, 0x0497, "Dell Precision T3600", 0),
15064     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
15065     /* Note the P55A-UD3 and Z87-D3HP share the subsys id for the HDA dev */
15066     SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P55A-UD3 / Z87-D3HP", 0),
15067     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
15068     + SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
15069     /* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
15070     SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
15071     /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
15072     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
15073     index cfd4e4f97f8f..3c5f2a603754 100644
15074     --- a/sound/pci/hda/patch_conexant.c
15075     +++ b/sound/pci/hda/patch_conexant.c
15076     @@ -943,6 +943,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
15077     SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
15078     SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
15079     SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
15080     + SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
15081     SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
15082     SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
15083     SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
15084     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
15085     index 3ac7ba9b342d..1070749c3cf4 100644
15086     --- a/sound/pci/hda/patch_realtek.c
15087     +++ b/sound/pci/hda/patch_realtek.c
15088     @@ -6841,6 +6841,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
15089     {0x1a, 0x02a11040},
15090     {0x1b, 0x01014020},
15091     {0x21, 0x0221101f}),
15092     + SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
15093     + {0x14, 0x90170110},
15094     + {0x19, 0x02a11030},
15095     + {0x1a, 0x02a11040},
15096     + {0x1b, 0x01011020},
15097     + {0x21, 0x0221101f}),
15098     SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
15099     {0x14, 0x90170110},
15100     {0x19, 0x02a11020},
15101     @@ -7738,6 +7744,8 @@ enum {
15102     ALC662_FIXUP_ASUS_Nx50,
15103     ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
15104     ALC668_FIXUP_ASUS_Nx51,
15105     + ALC668_FIXUP_MIC_COEF,
15106     + ALC668_FIXUP_ASUS_G751,
15107     ALC891_FIXUP_HEADSET_MODE,
15108     ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
15109     ALC662_FIXUP_ACER_VERITON,
15110     @@ -8007,6 +8015,23 @@ static const struct hda_fixup alc662_fixups[] = {
15111     .chained = true,
15112     .chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
15113     },
15114     + [ALC668_FIXUP_MIC_COEF] = {
15115     + .type = HDA_FIXUP_VERBS,
15116     + .v.verbs = (const struct hda_verb[]) {
15117     + { 0x20, AC_VERB_SET_COEF_INDEX, 0xc3 },
15118     + { 0x20, AC_VERB_SET_PROC_COEF, 0x4000 },
15119     + {}
15120     + },
15121     + },
15122     + [ALC668_FIXUP_ASUS_G751] = {
15123     + .type = HDA_FIXUP_PINS,
15124     + .v.pins = (const struct hda_pintbl[]) {
15125     + { 0x16, 0x0421101f }, /* HP */
15126     + {}
15127     + },
15128     + .chained = true,
15129     + .chain_id = ALC668_FIXUP_MIC_COEF
15130     + },
15131     [ALC891_FIXUP_HEADSET_MODE] = {
15132     .type = HDA_FIXUP_FUNC,
15133     .v.func = alc_fixup_headset_mode,
15134     @@ -8080,6 +8105,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
15135     SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
15136     SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
15137     SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
15138     + SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
15139     SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
15140     SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
15141     SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
15142     diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
15143     index 97f49b751e6e..568575b72f2f 100644
15144     --- a/sound/pci/hda/thinkpad_helper.c
15145     +++ b/sound/pci/hda/thinkpad_helper.c
15146     @@ -58,8 +58,8 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
15147     removefunc = false;
15148     }
15149     if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
15150     - snd_hda_gen_add_micmute_led(codec,
15151     - update_tpacpi_micmute) > 0)
15152     + !snd_hda_gen_add_micmute_led(codec,
15153     + update_tpacpi_micmute))
15154     removefunc = false;
15155     }
15156    
15157     diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
15158     index 77b265bd0505..3135e9eafd18 100644
15159     --- a/sound/soc/amd/acp-pcm-dma.c
15160     +++ b/sound/soc/amd/acp-pcm-dma.c
15161     @@ -1036,16 +1036,22 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
15162    
15163     if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
15164     period_bytes = frames_to_bytes(runtime, runtime->period_size);
15165     - dscr = acp_reg_read(rtd->acp_mmio, rtd->dma_curr_dscr);
15166     - if (dscr == rtd->dma_dscr_idx_1)
15167     - pos = period_bytes;
15168     - else
15169     - pos = 0;
15170     bytescount = acp_get_byte_count(rtd);
15171     - if (bytescount > rtd->bytescount)
15172     + if (bytescount >= rtd->bytescount)
15173     bytescount -= rtd->bytescount;
15174     - delay = do_div(bytescount, period_bytes);
15175     - runtime->delay = bytes_to_frames(runtime, delay);
15176     + if (bytescount < period_bytes) {
15177     + pos = 0;
15178     + } else {
15179     + dscr = acp_reg_read(rtd->acp_mmio, rtd->dma_curr_dscr);
15180     + if (dscr == rtd->dma_dscr_idx_1)
15181     + pos = period_bytes;
15182     + else
15183     + pos = 0;
15184     + }
15185     + if (bytescount > 0) {
15186     + delay = do_div(bytescount, period_bytes);
15187     + runtime->delay = bytes_to_frames(runtime, delay);
15188     + }
15189     } else {
15190     buffersize = frames_to_bytes(runtime, runtime->buffer_size);
15191     bytescount = acp_get_byte_count(rtd);
15192     diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
15193     index d5035f2f2b2b..ce508b4cc85c 100644
15194     --- a/sound/soc/codecs/sta32x.c
15195     +++ b/sound/soc/codecs/sta32x.c
15196     @@ -879,6 +879,9 @@ static int sta32x_probe(struct snd_soc_component *component)
15197     struct sta32x_priv *sta32x = snd_soc_component_get_drvdata(component);
15198     struct sta32x_platform_data *pdata = sta32x->pdata;
15199     int i, ret = 0, thermal = 0;
15200     +
15201     + sta32x->component = component;
15202     +
15203     ret = regulator_bulk_enable(ARRAY_SIZE(sta32x->supplies),
15204     sta32x->supplies);
15205     if (ret != 0) {
15206     diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
15207     index 2620d77729c5..f99c600f86e4 100644
15208     --- a/sound/soc/intel/skylake/skl-topology.c
15209     +++ b/sound/soc/intel/skylake/skl-topology.c
15210     @@ -2461,6 +2461,7 @@ static int skl_tplg_get_token(struct device *dev,
15211    
15212     case SKL_TKN_U8_CORE_ID:
15213     mconfig->core_id = tkn_elem->value;
15214     + break;
15215    
15216     case SKL_TKN_U8_MOD_TYPE:
15217     mconfig->m_type = tkn_elem->value;
15218     diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
15219     index 3013ac8f83d0..cab7b0aea6ea 100755
15220     --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
15221     +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
15222     @@ -48,7 +48,7 @@ trace_libc_inet_pton_backtrace() {
15223     *)
15224     eventattr='max-stack=3'
15225     echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
15226     - echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
15227     + echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
15228     ;;
15229     esac
15230    
15231     diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
15232     index 3d1cf5bf7f18..9005fbe0780e 100644
15233     --- a/tools/perf/util/strbuf.c
15234     +++ b/tools/perf/util/strbuf.c
15235     @@ -98,19 +98,25 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
15236    
15237     va_copy(ap_saved, ap);
15238     len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap);
15239     - if (len < 0)
15240     + if (len < 0) {
15241     + va_end(ap_saved);
15242     return len;
15243     + }
15244     if (len > strbuf_avail(sb)) {
15245     ret = strbuf_grow(sb, len);
15246     - if (ret)
15247     + if (ret) {
15248     + va_end(ap_saved);
15249     return ret;
15250     + }
15251     len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
15252     va_end(ap_saved);
15253     if (len > strbuf_avail(sb)) {
15254     pr_debug("this should not happen, your vsnprintf is broken");
15255     + va_end(ap_saved);
15256     return -EINVAL;
15257     }
15258     }
15259     + va_end(ap_saved);
15260     return strbuf_setlen(sb, sb->len + len);
15261     }
15262    
15263     diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
15264     index 7b0ca7cbb7de..8ad8e755127b 100644
15265     --- a/tools/perf/util/trace-event-info.c
15266     +++ b/tools/perf/util/trace-event-info.c
15267     @@ -531,12 +531,14 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
15268     "/tmp/perf-XXXXXX");
15269     if (!mkstemp(tdata->temp_file)) {
15270     pr_debug("Can't make temp file");
15271     + free(tdata);
15272     return NULL;
15273     }
15274    
15275     temp_fd = open(tdata->temp_file, O_RDWR);
15276     if (temp_fd < 0) {
15277     pr_debug("Can't read '%s'", tdata->temp_file);
15278     + free(tdata);
15279     return NULL;
15280     }
15281    
15282     diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
15283     index e76214f8d596..b15a9bf1837b 100644
15284     --- a/tools/perf/util/trace-event-parse.c
15285     +++ b/tools/perf/util/trace-event-parse.c
15286     @@ -158,6 +158,7 @@ void parse_ftrace_printk(struct tep_handle *pevent,
15287     printk = strdup(fmt+1);
15288     line = strtok_r(NULL, "\n", &next);
15289     tep_register_print_string(pevent, printk, addr);
15290     + free(printk);
15291     }
15292     }
15293    
15294     diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
15295     index 3dfc1db6b25b..5eb1b2469bba 100644
15296     --- a/tools/perf/util/trace-event-read.c
15297     +++ b/tools/perf/util/trace-event-read.c
15298     @@ -349,9 +349,12 @@ static int read_event_files(struct tep_handle *pevent)
15299     for (x=0; x < count; x++) {
15300     size = read8(pevent);
15301     ret = read_event_file(pevent, sys, size);
15302     - if (ret)
15303     + if (ret) {
15304     + free(sys);
15305     return ret;
15306     + }
15307     }
15308     + free(sys);
15309     }
15310     return 0;
15311     }
15312     diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c
15313     index df43cd45d810..ccd08dd00996 100644
15314     --- a/tools/power/cpupower/utils/cpufreq-info.c
15315     +++ b/tools/power/cpupower/utils/cpufreq-info.c
15316     @@ -200,6 +200,8 @@ static int get_boost_mode(unsigned int cpu)
15317     printf(_(" Boost States: %d\n"), b_states);
15318     printf(_(" Total States: %d\n"), pstate_no);
15319     for (i = 0; i < pstate_no; i++) {
15320     + if (!pstates[i])
15321     + continue;
15322     if (i < b_states)
15323     printf(_(" Pstate-Pb%d: %luMHz (boost state)"
15324     "\n"), i, pstates[i]);
15325     diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c
15326     index bb41cdd0df6b..9607ada5b29a 100644
15327     --- a/tools/power/cpupower/utils/helpers/amd.c
15328     +++ b/tools/power/cpupower/utils/helpers/amd.c
15329     @@ -33,7 +33,7 @@ union msr_pstate {
15330     unsigned vid:8;
15331     unsigned iddval:8;
15332     unsigned idddiv:2;
15333     - unsigned res1:30;
15334     + unsigned res1:31;
15335     unsigned en:1;
15336     } fam17h_bits;
15337     unsigned long long val;
15338     @@ -119,6 +119,11 @@ int decode_pstates(unsigned int cpu, unsigned int cpu_family,
15339     }
15340     if (read_msr(cpu, MSR_AMD_PSTATE + i, &pstate.val))
15341     return -1;
15342     + if ((cpu_family == 0x17) && (!pstate.fam17h_bits.en))
15343     + continue;
15344     + else if (!pstate.bits.en)
15345     + continue;
15346     +
15347     pstates[i] = get_cof(cpu_family, pstate);
15348     }
15349     *no = i;
15350     diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
15351     index cef11377dcbd..c604438df13b 100644
15352     --- a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
15353     +++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
15354     @@ -35,18 +35,18 @@ fi
15355    
15356     reset_trigger
15357    
15358     -echo "Test create synthetic event with an error"
15359     -echo 'wakeup_latency u64 lat pid_t pid char' > synthetic_events > /dev/null
15360     +echo "Test remove synthetic event"
15361     +echo '!wakeup_latency u64 lat pid_t pid char comm[16]' >> synthetic_events
15362     if [ -d events/synthetic/wakeup_latency ]; then
15363     - fail "Created wakeup_latency synthetic event with an invalid format"
15364     + fail "Failed to delete wakeup_latency synthetic event"
15365     fi
15366    
15367     reset_trigger
15368    
15369     -echo "Test remove synthetic event"
15370     -echo '!wakeup_latency u64 lat pid_t pid char comm[16]' > synthetic_events
15371     +echo "Test create synthetic event with an error"
15372     +echo 'wakeup_latency u64 lat pid_t pid char' > synthetic_events > /dev/null
15373     if [ -d events/synthetic/wakeup_latency ]; then
15374     - fail "Failed to delete wakeup_latency synthetic event"
15375     + fail "Created wakeup_latency synthetic event with an invalid format"
15376     fi
15377    
15378     do_reset
15379     diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
15380     index 327fa943c7f3..dbdffa2e2c82 100644
15381     --- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
15382     +++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
15383     @@ -67,8 +67,8 @@ trans:
15384     "3: ;"
15385     : [res] "=r" (result), [texasr] "=r" (texasr)
15386     : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
15387     - [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
15388     - [flt_2] "r" (&b), [flt_4] "r" (&d)
15389     + [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
15390     + [flt_4] "b" (&d)
15391     : "memory", "r5", "r6", "r7",
15392     "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
15393     "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
15394     diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
15395     index c92053bc3f96..8fb31a7cc22c 100644
15396     --- a/virt/kvm/arm/arm.c
15397     +++ b/virt/kvm/arm/arm.c
15398     @@ -1295,8 +1295,6 @@ static void cpu_init_hyp_mode(void *dummy)
15399    
15400     __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
15401     __cpu_init_stage2();
15402     -
15403     - kvm_arm_init_debug();
15404     }
15405    
15406     static void cpu_hyp_reset(void)
15407     @@ -1320,6 +1318,8 @@ static void cpu_hyp_reinit(void)
15408     cpu_init_hyp_mode(NULL);
15409     }
15410    
15411     + kvm_arm_init_debug();
15412     +
15413     if (vgic_present)
15414     kvm_vgic_init_cpu_hardware();
15415     }
15416     diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
15417     index ed162a6c57c5..2f405b0be25c 100644
15418     --- a/virt/kvm/arm/mmu.c
15419     +++ b/virt/kvm/arm/mmu.c
15420     @@ -1230,8 +1230,14 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
15421     {
15422     kvm_pfn_t pfn = *pfnp;
15423     gfn_t gfn = *ipap >> PAGE_SHIFT;
15424     + struct page *page = pfn_to_page(pfn);
15425    
15426     - if (PageTransCompoundMap(pfn_to_page(pfn))) {
15427     + /*
15428     + * PageTransCompoungMap() returns true for THP and
15429     + * hugetlbfs. Make sure the adjustment is done only for THP
15430     + * pages.
15431     + */
15432     + if (!PageHuge(page) && PageTransCompoundMap(page)) {
15433     unsigned long mask;
15434     /*
15435     * The address we faulted on is backed by a transparent huge