Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0111-4.19.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3390 - (hide annotations) (download)
Fri Aug 2 11:47:25 2019 UTC (5 years, 1 month ago) by niro
File size: 78737 byte(s)
-linux-4.19.12
1 niro 3390 diff --git a/Makefile b/Makefile
2     index 676155d4dc3e..9770f29a690a 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 11
10     +SUBLEVEL = 12
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
15     index c22b181e8206..2f39d9b3886e 100644
16     --- a/arch/arc/include/asm/io.h
17     +++ b/arch/arc/include/asm/io.h
18     @@ -12,6 +12,7 @@
19     #include <linux/types.h>
20     #include <asm/byteorder.h>
21     #include <asm/page.h>
22     +#include <asm/unaligned.h>
23    
24     #ifdef CONFIG_ISA_ARCV2
25     #include <asm/barrier.h>
26     @@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
27     return w;
28     }
29    
30     +/*
31     + * {read,write}s{b,w,l}() repeatedly access the same IO address in
32     + * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
33     + * @count times
34     + */
35     +#define __raw_readsx(t,f) \
36     +static inline void __raw_reads##f(const volatile void __iomem *addr, \
37     + void *ptr, unsigned int count) \
38     +{ \
39     + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
40     + u##t *buf = ptr; \
41     + \
42     + if (!count) \
43     + return; \
44     + \
45     + /* Some ARC CPU's don't support unaligned accesses */ \
46     + if (is_aligned) { \
47     + do { \
48     + u##t x = __raw_read##f(addr); \
49     + *buf++ = x; \
50     + } while (--count); \
51     + } else { \
52     + do { \
53     + u##t x = __raw_read##f(addr); \
54     + put_unaligned(x, buf++); \
55     + } while (--count); \
56     + } \
57     +}
58     +
59     +#define __raw_readsb __raw_readsb
60     +__raw_readsx(8, b)
61     +#define __raw_readsw __raw_readsw
62     +__raw_readsx(16, w)
63     +#define __raw_readsl __raw_readsl
64     +__raw_readsx(32, l)
65     +
66     #define __raw_writeb __raw_writeb
67     static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
68     {
69     @@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
70    
71     }
72    
73     +#define __raw_writesx(t,f) \
74     +static inline void __raw_writes##f(volatile void __iomem *addr, \
75     + const void *ptr, unsigned int count) \
76     +{ \
77     + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
78     + const u##t *buf = ptr; \
79     + \
80     + if (!count) \
81     + return; \
82     + \
83     + /* Some ARC CPU's don't support unaligned accesses */ \
84     + if (is_aligned) { \
85     + do { \
86     + __raw_write##f(*buf++, addr); \
87     + } while (--count); \
88     + } else { \
89     + do { \
90     + __raw_write##f(get_unaligned(buf++), addr); \
91     + } while (--count); \
92     + } \
93     +}
94     +
95     +#define __raw_writesb __raw_writesb
96     +__raw_writesx(8, b)
97     +#define __raw_writesw __raw_writesw
98     +__raw_writesx(16, w)
99     +#define __raw_writesl __raw_writesl
100     +__raw_writesx(32, l)
101     +
102     /*
103     * MMIO can also get buffered/optimized in micro-arch, so barriers needed
104     * Based on ARM model for the typical use case
105     @@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
106     #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
107     #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
108     #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
109     +#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
110     +#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
111     +#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
112    
113     #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
114     #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
115     #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
116     +#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
117     +#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
118     +#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
119    
120     /*
121     * Relaxed API for drivers which can handle barrier ordering themselves
122     diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
123     index 215df435bfb9..2149b47a0c5a 100644
124     --- a/arch/arm/mm/cache-v7.S
125     +++ b/arch/arm/mm/cache-v7.S
126     @@ -360,14 +360,16 @@ v7_dma_inv_range:
127     ALT_UP(W(nop))
128     #endif
129     mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
130     + addne r0, r0, r2
131    
132     tst r1, r3
133     bic r1, r1, r3
134     mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
135     -1:
136     - mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
137     - add r0, r0, r2
138     cmp r0, r1
139     +1:
140     + mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
141     + addlo r0, r0, r2
142     + cmplo r0, r1
143     blo 1b
144     dsb st
145     ret lr
146     diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
147     index 788486e830d3..32aa2a2aa260 100644
148     --- a/arch/arm/mm/cache-v7m.S
149     +++ b/arch/arm/mm/cache-v7m.S
150     @@ -73,9 +73,11 @@
151     /*
152     * dcimvac: Invalidate data cache line by MVA to PoC
153     */
154     -.macro dcimvac, rt, tmp
155     - v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
156     +.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
157     +.macro dcimvac\c, rt, tmp
158     + v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
159     .endm
160     +.endr
161    
162     /*
163     * dccmvau: Clean data cache line by MVA to PoU
164     @@ -369,14 +371,16 @@ v7m_dma_inv_range:
165     tst r0, r3
166     bic r0, r0, r3
167     dccimvacne r0, r3
168     + addne r0, r0, r2
169     subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
170     tst r1, r3
171     bic r1, r1, r3
172     dccimvacne r1, r3
173     -1:
174     - dcimvac r0, r3
175     - add r0, r0, r2
176     cmp r0, r1
177     +1:
178     + dcimvaclo r0, r3
179     + addlo r0, r0, r2
180     + cmplo r0, r1
181     blo 1b
182     dsb st
183     ret lr
184     diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
185     index 66566472c153..1cb9c0f9b5d6 100644
186     --- a/arch/arm/mm/dma-mapping.c
187     +++ b/arch/arm/mm/dma-mapping.c
188     @@ -830,7 +830,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
189     void *cpu_addr, dma_addr_t dma_addr, size_t size,
190     unsigned long attrs)
191     {
192     - int ret;
193     + int ret = -ENXIO;
194     unsigned long nr_vma_pages = vma_pages(vma);
195     unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
196     unsigned long pfn = dma_to_pfn(dev, dma_addr);
197     diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
198     index 3e70bed8a978..055c60a05756 100644
199     --- a/arch/x86/include/asm/qspinlock.h
200     +++ b/arch/x86/include/asm/qspinlock.h
201     @@ -6,9 +6,30 @@
202     #include <asm/cpufeature.h>
203     #include <asm-generic/qspinlock_types.h>
204     #include <asm/paravirt.h>
205     +#include <asm/rmwcc.h>
206    
207     #define _Q_PENDING_LOOPS (1 << 9)
208    
209     +#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
210     +
211     +static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
212     +{
213     + GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
214     + "I", _Q_PENDING_OFFSET, "%0", c);
215     +}
216     +
217     +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
218     +{
219     + u32 val = 0;
220     +
221     + if (__queued_RMW_btsl(lock))
222     + val |= _Q_PENDING_VAL;
223     +
224     + val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
225     +
226     + return val;
227     +}
228     +
229     #ifdef CONFIG_PARAVIRT_SPINLOCKS
230     extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
231     extern void __pv_init_lock_hash(void);
232     diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
233     index 5fdacb322ceb..c3e6be110b7d 100644
234     --- a/arch/x86/platform/efi/early_printk.c
235     +++ b/arch/x86/platform/efi/early_printk.c
236     @@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
237     num--;
238     }
239    
240     - if (efi_x >= si->lfb_width) {
241     + if (efi_x + font->width > si->lfb_width) {
242     efi_x = 0;
243     efi_y += font->height;
244     }
245     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
246     index c5d15752dfb3..75b331f8a16a 100644
247     --- a/drivers/acpi/nfit/core.c
248     +++ b/drivers/acpi/nfit/core.c
249     @@ -1303,7 +1303,7 @@ static ssize_t scrub_store(struct device *dev,
250     if (nd_desc) {
251     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
252    
253     - rc = acpi_nfit_ars_rescan(acpi_desc, 0);
254     + rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
255     }
256     device_unlock(dev);
257     if (rc)
258     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
259     index a7f5202a4815..b8c3f9e6af89 100644
260     --- a/drivers/ata/libata-core.c
261     +++ b/drivers/ata/libata-core.c
262     @@ -4602,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
263     { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
264     { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
265     { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
266     + { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
267     { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
268    
269     /*
270     diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
271     index ad8d483a35cd..ca7d37e2c7be 100644
272     --- a/drivers/clk/mmp/clk.c
273     +++ b/drivers/clk/mmp/clk.c
274     @@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
275     pr_err("CLK %d has invalid pointer %p\n", id, clk);
276     return;
277     }
278     - if (id > unit->nr_clks) {
279     + if (id >= unit->nr_clks) {
280     pr_err("CLK %d is invalid\n", id);
281     return;
282     }
283     diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
284     index 75bf7b8f282f..0153c76d4a20 100644
285     --- a/drivers/clk/mvebu/cp110-system-controller.c
286     +++ b/drivers/clk/mvebu/cp110-system-controller.c
287     @@ -202,11 +202,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
288     unsigned int idx = clkspec->args[1];
289    
290     if (type == CP110_CLK_TYPE_CORE) {
291     - if (idx > CP110_MAX_CORE_CLOCKS)
292     + if (idx >= CP110_MAX_CORE_CLOCKS)
293     return ERR_PTR(-EINVAL);
294     return clk_data->hws[idx];
295     } else if (type == CP110_CLK_TYPE_GATABLE) {
296     - if (idx > CP110_MAX_GATABLE_CLOCKS)
297     + if (idx >= CP110_MAX_GATABLE_CLOCKS)
298     return ERR_PTR(-EINVAL);
299     return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
300     }
301     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
302     index bd98cc5fb97b..fd825d30edf1 100644
303     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
304     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
305     @@ -292,9 +292,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
306     if (!info->return_size || !info->return_pointer)
307     return -EINVAL;
308    
309     - /* Ensure IB tests are run on ring */
310     - flush_delayed_work(&adev->late_init_work);
311     -
312     switch (info->query) {
313     case AMDGPU_INFO_ACCEL_WORKING:
314     ui32 = adev->accel_working;
315     @@ -861,6 +858,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
316     struct amdgpu_fpriv *fpriv;
317     int r, pasid;
318    
319     + /* Ensure IB tests are run on ring */
320     + flush_delayed_work(&adev->late_init_work);
321     +
322     file_priv->driver_priv = NULL;
323    
324     r = pm_runtime_get_sync(dev->dev);
325     diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
326     index 072371ef5975..4f8f3bb21832 100644
327     --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
328     +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
329     @@ -43,6 +43,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
330     static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
331     static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
332     static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
333     +static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
334    
335     /**
336     * vcn_v1_0_early_init - set function pointers
337     @@ -216,7 +217,7 @@ static int vcn_v1_0_hw_fini(void *handle)
338     struct amdgpu_ring *ring = &adev->vcn.ring_dec;
339    
340     if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
341     - vcn_v1_0_stop(adev);
342     + vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
343    
344     ring->ready = false;
345    
346     diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
347     index b2f308766a9e..0941f3c689bc 100644
348     --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
349     +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
350     @@ -2530,6 +2530,8 @@ static void pplib_apply_display_requirements(
351     dc,
352     context->bw.dce.sclk_khz);
353    
354     + pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
355     +
356     pp_display_cfg->min_engine_clock_deep_sleep_khz
357     = context->bw.dce.sclk_deep_sleep_khz;
358    
359     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
360     index 8994aa5c8cf8..64596029b696 100644
361     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
362     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
363     @@ -365,6 +365,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
364    
365     switch (task_id) {
366     case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
367     + ret = phm_pre_display_configuration_changed(hwmgr);
368     + if (ret)
369     + return ret;
370     ret = phm_set_cpu_power_state(hwmgr);
371     if (ret)
372     return ret;
373     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
374     index 91ffb7bc4ee7..56437866d120 100644
375     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
376     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
377     @@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
378     if (skip)
379     return 0;
380    
381     - phm_pre_display_configuration_changed(hwmgr);
382     -
383     phm_display_configuration_changed(hwmgr);
384    
385     if (hwmgr->ps)
386     diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
387     index 0cd827e11fa2..de26df0c6044 100644
388     --- a/drivers/gpu/drm/ast/ast_fb.c
389     +++ b/drivers/gpu/drm/ast/ast_fb.c
390     @@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
391     {
392     struct ast_framebuffer *afb = &afbdev->afb;
393    
394     + drm_crtc_force_disable_all(dev);
395     drm_fb_helper_unregister_fbi(&afbdev->helper);
396    
397     if (afb->obj) {
398     diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
399     index cd02eae884cc..4752f08f0884 100644
400     --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
401     +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
402     @@ -2122,7 +2122,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
403     NULL);
404    
405     drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
406     - plane->crtc = crtc;
407    
408     /* save user friendly CRTC name for later */
409     snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
410     diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
411     index 1b4de3486ef9..ec3fd67378c1 100644
412     --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
413     +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
414     @@ -503,8 +503,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
415    
416     drm_encoder_cleanup(drm_enc);
417     mutex_destroy(&dpu_enc->enc_lock);
418     -
419     - kfree(dpu_enc);
420     }
421    
422     void dpu_encoder_helper_split_config(
423     diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
424     index 4c03f0b7343e..41bec570c518 100644
425     --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
426     +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
427     @@ -39,6 +39,8 @@
428     #define DSI_PIXEL_PLL_CLK 1
429     #define NUM_PROVIDED_CLKS 2
430    
431     +#define VCO_REF_CLK_RATE 19200000
432     +
433     struct dsi_pll_regs {
434     u32 pll_prop_gain_rate;
435     u32 pll_lockdet_rate;
436     @@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
437     parent_rate);
438    
439     pll_10nm->vco_current_rate = rate;
440     - pll_10nm->vco_ref_clk_rate = parent_rate;
441     + pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
442    
443     dsi_pll_setup_config(pll_10nm);
444    
445     diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
446     index c79659ca5706..33e083f71a17 100644
447     --- a/drivers/gpu/drm/msm/hdmi/hdmi.c
448     +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
449     @@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
450     goto fail;
451     }
452    
453     + ret = msm_hdmi_hpd_enable(hdmi->connector);
454     + if (ret < 0) {
455     + DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
456     + goto fail;
457     + }
458     +
459     encoder->bridge = hdmi->bridge;
460    
461     priv->bridges[priv->num_bridges++] = hdmi->bridge;
462     diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
463     index accc9a61611d..5c5df6ab2a57 100644
464     --- a/drivers/gpu/drm/msm/hdmi/hdmi.h
465     +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
466     @@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
467    
468     void msm_hdmi_connector_irq(struct drm_connector *connector);
469     struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
470     +int msm_hdmi_hpd_enable(struct drm_connector *connector);
471    
472     /*
473     * i2c adapter for ddc:
474     diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
475     index e9c9a0af508e..30e908dfded7 100644
476     --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
477     +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
478     @@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
479     }
480     }
481    
482     -static int hpd_enable(struct hdmi_connector *hdmi_connector)
483     +int msm_hdmi_hpd_enable(struct drm_connector *connector)
484     {
485     + struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
486     struct hdmi *hdmi = hdmi_connector->hdmi;
487     const struct hdmi_platform_config *config = hdmi->config;
488     struct device *dev = &hdmi->pdev->dev;
489     @@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
490     {
491     struct drm_connector *connector = NULL;
492     struct hdmi_connector *hdmi_connector;
493     - int ret;
494    
495     hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
496     if (!hdmi_connector)
497     @@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
498     connector->interlace_allowed = 0;
499     connector->doublescan_allowed = 0;
500    
501     - ret = hpd_enable(hdmi_connector);
502     - if (ret) {
503     - dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
504     - return ERR_PTR(ret);
505     - }
506     -
507     drm_connector_attach_encoder(connector, hdmi->encoder);
508    
509     return connector;
510     diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
511     index c1f1779c980f..2b7bb6e166d3 100644
512     --- a/drivers/gpu/drm/msm/msm_atomic.c
513     +++ b/drivers/gpu/drm/msm/msm_atomic.c
514     @@ -32,7 +32,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
515     if (!new_crtc_state->active)
516     continue;
517    
518     + if (drm_crtc_vblank_get(crtc))
519     + continue;
520     +
521     kms->funcs->wait_for_crtc_commit_done(kms, crtc);
522     +
523     + drm_crtc_vblank_put(crtc);
524     }
525     }
526    
527     diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
528     index f0da0d3c8a80..d756436c1fcd 100644
529     --- a/drivers/gpu/drm/msm/msm_debugfs.c
530     +++ b/drivers/gpu/drm/msm/msm_debugfs.c
531     @@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
532    
533     ret = mutex_lock_interruptible(&dev->struct_mutex);
534     if (ret)
535     - return ret;
536     + goto free_priv;
537    
538     pm_runtime_get_sync(&gpu->pdev->dev);
539     show_priv->state = gpu->funcs->gpu_state_get(gpu);
540     @@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
541    
542     if (IS_ERR(show_priv->state)) {
543     ret = PTR_ERR(show_priv->state);
544     - kfree(show_priv);
545     - return ret;
546     + goto free_priv;
547     }
548    
549     show_priv->dev = dev;
550    
551     - return single_open(file, msm_gpu_show, show_priv);
552     + ret = single_open(file, msm_gpu_show, show_priv);
553     + if (ret)
554     + goto free_priv;
555     +
556     + return 0;
557     +
558     +free_priv:
559     + kfree(show_priv);
560     + return ret;
561     }
562    
563     static const struct file_operations msm_gpu_fops = {
564     diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
565     index 46e6b82f7b66..52a2146dc1f2 100644
566     --- a/drivers/gpu/drm/msm/msm_gpu.c
567     +++ b/drivers/gpu/drm/msm/msm_gpu.c
568     @@ -425,10 +425,9 @@ static void recover_worker(struct work_struct *work)
569     if (submit) {
570     struct task_struct *task;
571    
572     - rcu_read_lock();
573     - task = pid_task(submit->pid, PIDTYPE_PID);
574     + task = get_pid_task(submit->pid, PIDTYPE_PID);
575     if (task) {
576     - comm = kstrdup(task->comm, GFP_ATOMIC);
577     + comm = kstrdup(task->comm, GFP_KERNEL);
578    
579     /*
580     * So slightly annoying, in other paths like
581     @@ -441,10 +440,10 @@ static void recover_worker(struct work_struct *work)
582     * about the submit going away.
583     */
584     mutex_unlock(&dev->struct_mutex);
585     - cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
586     + cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
587     + put_task_struct(task);
588     mutex_lock(&dev->struct_mutex);
589     }
590     - rcu_read_unlock();
591    
592     if (comm && cmd) {
593     dev_err(dev->dev, "%s: offending task: %s (%s)\n",
594     diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
595     index b23d33622f37..2a90aa4caec0 100644
596     --- a/drivers/gpu/drm/msm/msm_iommu.c
597     +++ b/drivers/gpu/drm/msm/msm_iommu.c
598     @@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
599     // pm_runtime_get_sync(mmu->dev);
600     ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
601     // pm_runtime_put_sync(mmu->dev);
602     - WARN_ON(ret < 0);
603     + WARN_ON(!ret);
604    
605     return (ret == len) ? 0 : -EINVAL;
606     }
607     diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
608     index 3aa8a8576abe..f7a0edea4705 100644
609     --- a/drivers/gpu/drm/msm/msm_rd.c
610     +++ b/drivers/gpu/drm/msm/msm_rd.c
611     @@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
612     uint64_t iova, uint32_t size)
613     {
614     struct msm_gem_object *obj = submit->bos[idx].obj;
615     + unsigned offset = 0;
616     const char *buf;
617    
618     if (iova) {
619     - buf += iova - submit->bos[idx].iova;
620     + offset = iova - submit->bos[idx].iova;
621     } else {
622     iova = submit->bos[idx].iova;
623     size = obj->base.size;
624     @@ -340,6 +341,8 @@ static void snapshot_buf(struct msm_rd_state *rd,
625     if (IS_ERR(buf))
626     return;
627    
628     + buf += offset;
629     +
630     rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
631    
632     msm_gem_put_vaddr(&obj->base);
633     diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
634     index 046a6dda690a..40904e84f883 100644
635     --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
636     +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
637     @@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
638     if (!fbo)
639     return -ENOMEM;
640    
641     - ttm_bo_get(bo);
642     fbo->base = *bo;
643     + fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
644     +
645     + ttm_bo_get(bo);
646     fbo->bo = bo;
647    
648     /**
649     diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
650     index b372854cf38d..704049e62d58 100644
651     --- a/drivers/hid/hid-hyperv.c
652     +++ b/drivers/hid/hid-hyperv.c
653     @@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device,
654     hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
655     input_dev->input_buf, len, 1);
656    
657     - pm_wakeup_event(&input_dev->device->device, 0);
658     + pm_wakeup_hard_event(&input_dev->device->device);
659    
660     break;
661     default:
662     diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
663     index 8e60048a33f8..51d34959709b 100644
664     --- a/drivers/i2c/busses/i2c-axxia.c
665     +++ b/drivers/i2c/busses/i2c-axxia.c
666     @@ -74,8 +74,7 @@
667     MST_STATUS_ND)
668     #define MST_STATUS_ERR (MST_STATUS_NAK | \
669     MST_STATUS_AL | \
670     - MST_STATUS_IP | \
671     - MST_STATUS_TSS)
672     + MST_STATUS_IP)
673     #define MST_TX_BYTES_XFRD 0x50
674     #define MST_RX_BYTES_XFRD 0x54
675     #define SCL_HIGH_PERIOD 0x80
676     @@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
677     */
678     if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
679     idev->msg_err = -EPROTO;
680     - i2c_int_disable(idev, ~0);
681     + i2c_int_disable(idev, ~MST_STATUS_TSS);
682     complete(&idev->msg_complete);
683     break;
684     }
685     @@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
686    
687     if (status & MST_STATUS_SCC) {
688     /* Stop completed */
689     - i2c_int_disable(idev, ~0);
690     + i2c_int_disable(idev, ~MST_STATUS_TSS);
691     complete(&idev->msg_complete);
692     } else if (status & MST_STATUS_SNS) {
693     /* Transfer done */
694     - i2c_int_disable(idev, ~0);
695     + i2c_int_disable(idev, ~MST_STATUS_TSS);
696     if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
697     axxia_i2c_empty_rx_fifo(idev);
698     complete(&idev->msg_complete);
699     + } else if (status & MST_STATUS_TSS) {
700     + /* Transfer timeout */
701     + idev->msg_err = -ETIMEDOUT;
702     + i2c_int_disable(idev, ~MST_STATUS_TSS);
703     + complete(&idev->msg_complete);
704     } else if (unlikely(status & MST_STATUS_ERR)) {
705     /* Transfer error */
706     i2c_int_disable(idev, ~0);
707     @@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
708     u32 rx_xfer, tx_xfer;
709     u32 addr_1, addr_2;
710     unsigned long time_left;
711     + unsigned int wt_value;
712    
713     idev->msg = msg;
714     idev->msg_xfrd = 0;
715     - idev->msg_err = 0;
716     reinit_completion(&idev->msg_complete);
717    
718     if (i2c_m_ten(msg)) {
719     @@ -383,9 +387,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
720     else if (axxia_i2c_fill_tx_fifo(idev) != 0)
721     int_mask |= MST_STATUS_TFL;
722    
723     + wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
724     + /* Disable wait timer temporarly */
725     + writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
726     + /* Check if timeout error happened */
727     + if (idev->msg_err)
728     + goto out;
729     +
730     /* Start manual mode */
731     writel(CMD_MANUAL, idev->base + MST_COMMAND);
732    
733     + writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
734     +
735     i2c_int_enable(idev, int_mask);
736    
737     time_left = wait_for_completion_timeout(&idev->msg_complete,
738     @@ -396,13 +409,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
739     if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
740     dev_warn(idev->dev, "busy after xfer\n");
741    
742     - if (time_left == 0)
743     + if (time_left == 0) {
744     idev->msg_err = -ETIMEDOUT;
745     -
746     - if (idev->msg_err == -ETIMEDOUT)
747     i2c_recover_bus(&idev->adapter);
748     + axxia_i2c_init(idev);
749     + }
750    
751     - if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
752     +out:
753     + if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
754     + idev->msg_err != -ETIMEDOUT)
755     axxia_i2c_init(idev);
756    
757     return idev->msg_err;
758     @@ -410,7 +425,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
759    
760     static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
761     {
762     - u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
763     + u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
764     unsigned long time_left;
765    
766     reinit_completion(&idev->msg_complete);
767     @@ -437,6 +452,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
768     int i;
769     int ret = 0;
770    
771     + idev->msg_err = 0;
772     + i2c_int_enable(idev, MST_STATUS_TSS);
773     +
774     for (i = 0; ret == 0 && i < num; ++i)
775     ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
776    
777     diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
778     index 4aa7dde876f3..254e6219e538 100644
779     --- a/drivers/i2c/busses/i2c-rcar.c
780     +++ b/drivers/i2c/busses/i2c-rcar.c
781     @@ -779,6 +779,11 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
782    
783     pm_runtime_get_sync(dev);
784    
785     + /* Check bus state before init otherwise bus busy info will be lost */
786     + ret = rcar_i2c_bus_barrier(priv);
787     + if (ret < 0)
788     + goto out;
789     +
790     /* Gen3 needs a reset before allowing RXDMA once */
791     if (priv->devtype == I2C_RCAR_GEN3) {
792     priv->flags |= ID_P_NO_RXDMA;
793     @@ -791,10 +796,6 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
794    
795     rcar_i2c_init(priv);
796    
797     - ret = rcar_i2c_bus_barrier(priv);
798     - if (ret < 0)
799     - goto out;
800     -
801     for (i = 0; i < num; i++)
802     rcar_i2c_request_dma(priv, msgs + i);
803    
804     diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
805     index 7e9a2bbf5ddc..ff3f4553648f 100644
806     --- a/drivers/i2c/busses/i2c-scmi.c
807     +++ b/drivers/i2c/busses/i2c-scmi.c
808     @@ -367,6 +367,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
809     {
810     struct acpi_smbus_cmi *smbus_cmi;
811     const struct acpi_device_id *id;
812     + int ret;
813    
814     smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
815     if (!smbus_cmi)
816     @@ -388,8 +389,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
817     acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
818     acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
819    
820     - if (smbus_cmi->cap_info == 0)
821     + if (smbus_cmi->cap_info == 0) {
822     + ret = -ENODEV;
823     goto err;
824     + }
825    
826     snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
827     "SMBus CMI adapter %s",
828     @@ -400,7 +403,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
829     smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
830     smbus_cmi->adapter.dev.parent = &device->dev;
831    
832     - if (i2c_add_adapter(&smbus_cmi->adapter)) {
833     + ret = i2c_add_adapter(&smbus_cmi->adapter);
834     + if (ret) {
835     dev_err(&device->dev, "Couldn't register adapter!\n");
836     goto err;
837     }
838     @@ -410,7 +414,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
839     err:
840     kfree(smbus_cmi);
841     device->driver_data = NULL;
842     - return -EIO;
843     + return ret;
844     }
845    
846     static int acpi_smbus_cmi_remove(struct acpi_device *device)
847     diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
848     index a403e8579b65..bc26ec822e26 100644
849     --- a/drivers/i2c/busses/i2c-uniphier-f.c
850     +++ b/drivers/i2c/busses/i2c-uniphier-f.c
851     @@ -470,9 +470,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv)
852    
853     uniphier_fi2c_reset(priv);
854    
855     + /*
856     + * Standard-mode: tLOW + tHIGH = 10 us
857     + * Fast-mode: tLOW + tHIGH = 2.5 us
858     + */
859     writel(cyc, priv->membase + UNIPHIER_FI2C_CYC);
860     - writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL);
861     + /*
862     + * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us
863     + * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us
864     + * "tLow/tHIGH = 5/4" meets both.
865     + */
866     + writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL);
867     + /*
868     + * Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us
869     + * Fast-mode: tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us
870     + */
871     writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT);
872     + /*
873     + * Standard-mode: tSU;DAT = 250 ns
874     + * Fast-mode: tSU;DAT = 100 ns
875     + */
876     writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT);
877    
878     uniphier_fi2c_prepare_operation(priv);
879     diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
880     index 454f914ae66d..c488e558aef7 100644
881     --- a/drivers/i2c/busses/i2c-uniphier.c
882     +++ b/drivers/i2c/busses/i2c-uniphier.c
883     @@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv)
884    
885     uniphier_i2c_reset(priv, true);
886    
887     - writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
888     + /*
889     + * Bit30-16: clock cycles of tLOW.
890     + * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us
891     + * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us
892     + * "tLow/tHIGH = 5/4" meets both.
893     + */
894     + writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
895    
896     uniphier_i2c_reset(priv, false);
897     }
898     diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
899     index c5b902b86b44..203ed4adc04a 100644
900     --- a/drivers/ide/pmac.c
901     +++ b/drivers/ide/pmac.c
902     @@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
903     struct device_node *root = of_find_node_by_path("/");
904     const char *model = of_get_property(root, "model", NULL);
905    
906     + of_node_put(root);
907     /* Get cable type from device-tree. */
908     if (cable && !strncmp(cable, "80-", 3)) {
909     /* Some drives fail to detect 80c cable in PowerBook */
910     diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
911     index 39134dd305f5..51831bfbf90f 100644
912     --- a/drivers/infiniband/hw/hfi1/user_sdma.c
913     +++ b/drivers/infiniband/hw/hfi1/user_sdma.c
914     @@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
915     pq->ctxt = uctxt->ctxt;
916     pq->subctxt = fd->subctxt;
917     pq->n_max_reqs = hfi1_sdma_comp_ring_size;
918     - pq->state = SDMA_PKT_Q_INACTIVE;
919     atomic_set(&pq->n_reqs, 0);
920     init_waitqueue_head(&pq->wait);
921     atomic_set(&pq->n_locked, 0);
922     @@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
923     /* Wait until all requests have been freed. */
924     wait_event_interruptible(
925     pq->wait,
926     - (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
927     + !atomic_read(&pq->n_reqs));
928     kfree(pq->reqs);
929     kfree(pq->req_in_use);
930     kmem_cache_destroy(pq->txreq_cache);
931     @@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid)
932     return mapping[hash];
933     }
934    
935     +/**
936     + * hfi1_user_sdma_process_request() - Process and start a user sdma request
937     + * @fd: valid file descriptor
938     + * @iovec: array of io vectors to process
939     + * @dim: overall iovec array size
940     + * @count: number of io vector array entries processed
941     + */
942     int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
943     struct iovec *iovec, unsigned long dim,
944     unsigned long *count)
945     @@ -560,20 +566,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
946     req->ahg_idx = sdma_ahg_alloc(req->sde);
947    
948     set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
949     + pq->state = SDMA_PKT_Q_ACTIVE;
950     /* Send the first N packets in the request to buy us some time */
951     ret = user_sdma_send_pkts(req, pcount);
952     if (unlikely(ret < 0 && ret != -EBUSY))
953     goto free_req;
954    
955     - /*
956     - * It is possible that the SDMA engine would have processed all the
957     - * submitted packets by the time we get here. Therefore, only set
958     - * packet queue state to ACTIVE if there are still uncompleted
959     - * requests.
960     - */
961     - if (atomic_read(&pq->n_reqs))
962     - xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
963     -
964     /*
965     * This is a somewhat blocking send implementation.
966     * The driver will block the caller until all packets of the
967     @@ -1409,10 +1407,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
968    
969     static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
970     {
971     - if (atomic_dec_and_test(&pq->n_reqs)) {
972     - xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
973     + if (atomic_dec_and_test(&pq->n_reqs))
974     wake_up(&pq->wait);
975     - }
976     }
977    
978     static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
979     diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
980     index 0ae06456c868..91c343f91776 100644
981     --- a/drivers/infiniband/hw/hfi1/user_sdma.h
982     +++ b/drivers/infiniband/hw/hfi1/user_sdma.h
983     @@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
984     #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
985     #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
986    
987     -#define SDMA_PKT_Q_INACTIVE BIT(0)
988     -#define SDMA_PKT_Q_ACTIVE BIT(1)
989     -#define SDMA_PKT_Q_DEFERRED BIT(2)
990     +enum pkt_q_sdma_state {
991     + SDMA_PKT_Q_ACTIVE,
992     + SDMA_PKT_Q_DEFERRED,
993     +};
994    
995     /*
996     * Maximum retry attempts to submit a TX request
997     @@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q {
998     struct user_sdma_request *reqs;
999     unsigned long *req_in_use;
1000     struct iowait busy;
1001     - unsigned state;
1002     + enum pkt_q_sdma_state state;
1003     wait_queue_head_t wait;
1004     unsigned long unpinned;
1005     struct mmu_rb_handler *handler;
1006     diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
1007     index 46406345742b..a7dc286f406c 100644
1008     --- a/drivers/input/keyboard/omap4-keypad.c
1009     +++ b/drivers/input/keyboard/omap4-keypad.c
1010     @@ -60,8 +60,18 @@
1011    
1012     /* OMAP4 values */
1013     #define OMAP4_VAL_IRQDISABLE 0x0
1014     -#define OMAP4_VAL_DEBOUNCINGTIME 0x7
1015     -#define OMAP4_VAL_PVT 0x7
1016     +
1017     +/*
1018     + * Errata i689: If a key is released for a time shorter than debounce time,
1019     + * the keyboard will idle and never detect the key release. The workaround
1020     + * is to use at least a 12ms debounce time. See omap5432 TRM chapter
1021     + * "26.4.6.2 Keyboard Controller Timer" for more information.
1022     + */
1023     +#define OMAP4_KEYPAD_PTV_DIV_128 0x6
1024     +#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \
1025     + ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
1026     +#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
1027     + OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
1028    
1029     enum {
1030     KBD_REVISION_OMAP4 = 0,
1031     @@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
1032    
1033     kbd_writel(keypad_data, OMAP4_KBD_CTRL,
1034     OMAP4_DEF_CTRL_NOSOFTMODE |
1035     - (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
1036     + (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
1037     kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
1038     - OMAP4_VAL_DEBOUNCINGTIME);
1039     + OMAP4_VAL_DEBOUNCINGTIME_16MS);
1040     /* clear pending interrupts */
1041     kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
1042     kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
1043     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1044     index c42813d50591..2bd5bb11c8ba 100644
1045     --- a/drivers/input/mouse/synaptics.c
1046     +++ b/drivers/input/mouse/synaptics.c
1047     @@ -178,6 +178,7 @@ static const char * const smbus_pnp_ids[] = {
1048     "LEN0096", /* X280 */
1049     "LEN0097", /* X280 -> ALPS trackpoint */
1050     "LEN200f", /* T450s */
1051     + "SYN3221", /* HP 15-ay000 */
1052     NULL
1053     };
1054    
1055     diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
1056     index 47a0e81a2989..a8b9be3e28db 100644
1057     --- a/drivers/input/serio/hyperv-keyboard.c
1058     +++ b/drivers/input/serio/hyperv-keyboard.c
1059     @@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
1060     * state because the Enter-UP can trigger a wakeup at once.
1061     */
1062     if (!(info & IS_BREAK))
1063     - pm_wakeup_event(&hv_dev->device, 0);
1064     + pm_wakeup_hard_event(&hv_dev->device);
1065    
1066     break;
1067    
1068     diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
1069     index f43fb2f958a5..93dfcef8afc4 100644
1070     --- a/drivers/net/bonding/bond_3ad.c
1071     +++ b/drivers/net/bonding/bond_3ad.c
1072     @@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
1073     aggregator->aggregator_identifier);
1074    
1075     /* Tell the partner that this port is not suitable for aggregation */
1076     + port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
1077     + port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
1078     + port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
1079     port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
1080     __update_lacpdu_from_port(port);
1081     ad_lacpdu_send(port);
1082     diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
1083     index 65f10fec25b3..0b3e51f248c2 100644
1084     --- a/drivers/net/dsa/mv88e6060.c
1085     +++ b/drivers/net/dsa/mv88e6060.c
1086     @@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
1087     /* Reset the switch. */
1088     REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
1089     GLOBAL_ATU_CONTROL_SWRESET |
1090     - GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
1091     - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
1092     + GLOBAL_ATU_CONTROL_LEARNDIS);
1093    
1094     /* Wait up to one second for reset to complete. */
1095     timeout = jiffies + 1 * HZ;
1096     @@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
1097     */
1098     REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
1099    
1100     - /* Enable automatic address learning, set the address
1101     - * database size to 1024 entries, and set the default aging
1102     - * time to 5 minutes.
1103     + /* Disable automatic address learning.
1104     */
1105     REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
1106     - GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
1107     - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
1108     + GLOBAL_ATU_CONTROL_LEARNDIS);
1109    
1110     return 0;
1111     }
1112     diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1113     index 88705dee5b95..56363ff5c891 100644
1114     --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1115     +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1116     @@ -667,7 +667,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
1117    
1118     rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
1119    
1120     - is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
1121     + is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
1122    
1123     pkt_type = 0xFFU & (rxd_wb->type >> 4);
1124    
1125     diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
1126     index ddd7431579f4..c99b59fe4c8f 100644
1127     --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
1128     +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
1129     @@ -367,13 +367,15 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
1130     struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1131     struct sk_buff *skb = sc->ctxptr;
1132     struct net_device *ndev = skb->dev;
1133     + u32 iq_no;
1134    
1135     dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
1136     sc->datasize, DMA_TO_DEVICE);
1137     dev_kfree_skb_any(skb);
1138     + iq_no = sc->iq_no;
1139     octeon_free_soft_command(oct, sc);
1140    
1141     - if (octnet_iq_is_full(oct, sc->iq_no))
1142     + if (octnet_iq_is_full(oct, iq_no))
1143     return;
1144    
1145     if (netif_queue_stopped(ndev))
1146     diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
1147     index c415ac67cb7b..e80fedb27cee 100644
1148     --- a/drivers/net/ethernet/freescale/fman/fman.c
1149     +++ b/drivers/net/ethernet/freescale/fman/fman.c
1150     @@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
1151     if (!muram_node) {
1152     dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
1153     __func__);
1154     - goto fman_node_put;
1155     + goto fman_free;
1156     }
1157    
1158     err = of_address_to_resource(muram_node, 0,
1159     @@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
1160     of_node_put(muram_node);
1161     dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
1162     __func__, err);
1163     - goto fman_node_put;
1164     + goto fman_free;
1165     }
1166    
1167     of_node_put(muram_node);
1168     - of_node_put(fm_node);
1169    
1170     err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
1171     "fman", fman);
1172     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1173     index a74002b43b51..6320e080b831 100644
1174     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1175     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
1176     @@ -4262,8 +4262,27 @@ static void mvpp2_phylink_validate(struct net_device *dev,
1177     unsigned long *supported,
1178     struct phylink_link_state *state)
1179     {
1180     + struct mvpp2_port *port = netdev_priv(dev);
1181     __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1182    
1183     + /* Invalid combinations */
1184     + switch (state->interface) {
1185     + case PHY_INTERFACE_MODE_10GKR:
1186     + case PHY_INTERFACE_MODE_XAUI:
1187     + if (port->gop_id != 0)
1188     + goto empty_set;
1189     + break;
1190     + case PHY_INTERFACE_MODE_RGMII:
1191     + case PHY_INTERFACE_MODE_RGMII_ID:
1192     + case PHY_INTERFACE_MODE_RGMII_RXID:
1193     + case PHY_INTERFACE_MODE_RGMII_TXID:
1194     + if (port->gop_id == 0)
1195     + goto empty_set;
1196     + break;
1197     + default:
1198     + break;
1199     + }
1200     +
1201     phylink_set(mask, Autoneg);
1202     phylink_set_port_modes(mask);
1203     phylink_set(mask, Pause);
1204     @@ -4271,6 +4290,8 @@ static void mvpp2_phylink_validate(struct net_device *dev,
1205    
1206     switch (state->interface) {
1207     case PHY_INTERFACE_MODE_10GKR:
1208     + case PHY_INTERFACE_MODE_XAUI:
1209     + case PHY_INTERFACE_MODE_NA:
1210     phylink_set(mask, 10000baseCR_Full);
1211     phylink_set(mask, 10000baseSR_Full);
1212     phylink_set(mask, 10000baseLR_Full);
1213     @@ -4278,7 +4299,11 @@ static void mvpp2_phylink_validate(struct net_device *dev,
1214     phylink_set(mask, 10000baseER_Full);
1215     phylink_set(mask, 10000baseKR_Full);
1216     /* Fall-through */
1217     - default:
1218     + case PHY_INTERFACE_MODE_RGMII:
1219     + case PHY_INTERFACE_MODE_RGMII_ID:
1220     + case PHY_INTERFACE_MODE_RGMII_RXID:
1221     + case PHY_INTERFACE_MODE_RGMII_TXID:
1222     + case PHY_INTERFACE_MODE_SGMII:
1223     phylink_set(mask, 10baseT_Half);
1224     phylink_set(mask, 10baseT_Full);
1225     phylink_set(mask, 100baseT_Half);
1226     @@ -4290,11 +4315,18 @@ static void mvpp2_phylink_validate(struct net_device *dev,
1227     phylink_set(mask, 1000baseT_Full);
1228     phylink_set(mask, 1000baseX_Full);
1229     phylink_set(mask, 2500baseX_Full);
1230     + break;
1231     + default:
1232     + goto empty_set;
1233     }
1234    
1235     bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1236     bitmap_and(state->advertising, state->advertising, mask,
1237     __ETHTOOL_LINK_MODE_MASK_NBITS);
1238     + return;
1239     +
1240     +empty_set:
1241     + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1242     }
1243    
1244     static void mvpp22_xlg_link_state(struct mvpp2_port *port,
1245     diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
1246     index 36054e6fb9d3..f200b8c420d5 100644
1247     --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
1248     +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
1249     @@ -5,7 +5,7 @@
1250     config MLX4_EN
1251     tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
1252     depends on MAY_USE_DEVLINK
1253     - depends on PCI
1254     + depends on PCI && NETDEVICES && ETHERNET && INET
1255     select MLX4_CORE
1256     imply PTP_1588_CLOCK
1257     ---help---
1258     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1259     index 7715f1ed2bcb..4eb64cb0d9a1 100644
1260     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1261     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1262     @@ -286,7 +286,13 @@ static bool
1263     mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
1264     bridge_port)
1265     {
1266     - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
1267     + struct net_device *dev = bridge_port->dev;
1268     + struct mlxsw_sp *mlxsw_sp;
1269     +
1270     + if (is_vlan_dev(dev))
1271     + mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
1272     + else
1273     + mlxsw_sp = mlxsw_sp_lower_get(dev);
1274    
1275     /* In case ports were pulled from out of a bridged LAG, then
1276     * it's possible the reference count isn't zero, yet the bridge
1277     @@ -2020,7 +2026,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1278    
1279     vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
1280     mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1281     - if (WARN_ON(!mlxsw_sp_port_vlan))
1282     + if (!mlxsw_sp_port_vlan)
1283     return;
1284    
1285     mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1286     diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
1287     index f7ecceeb1e28..f27d67a4d304 100644
1288     --- a/drivers/net/ethernet/socionext/sni_ave.c
1289     +++ b/drivers/net/ethernet/socionext/sni_ave.c
1290     @@ -194,6 +194,7 @@
1291    
1292     /* Parameter for ethernet frame */
1293     #define AVE_MAX_ETHFRAME 1518
1294     +#define AVE_FRAME_HEADROOM 2
1295    
1296     /* Parameter for interrupt */
1297     #define AVE_INTM_COUNT 20
1298     @@ -585,12 +586,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
1299    
1300     skb = priv->rx.desc[entry].skbs;
1301     if (!skb) {
1302     - skb = netdev_alloc_skb_ip_align(ndev,
1303     - AVE_MAX_ETHFRAME);
1304     + skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
1305     if (!skb) {
1306     netdev_err(ndev, "can't allocate skb for Rx\n");
1307     return -ENOMEM;
1308     }
1309     + skb->data += AVE_FRAME_HEADROOM;
1310     + skb->tail += AVE_FRAME_HEADROOM;
1311     }
1312    
1313     /* set disable to cmdsts */
1314     @@ -603,12 +605,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
1315     * - Rx buffer begins with 2 byte headroom, and data will be put from
1316     * (buffer + 2).
1317     * To satisfy this, specify the address to put back the buffer
1318     - * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
1319     - * and expand the map size by NET_IP_ALIGN.
1320     + * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
1321     + * by AVE_FRAME_HEADROOM.
1322     */
1323     ret = ave_dma_map(ndev, &priv->rx.desc[entry],
1324     - skb->data - NET_IP_ALIGN,
1325     - AVE_MAX_ETHFRAME + NET_IP_ALIGN,
1326     + skb->data - AVE_FRAME_HEADROOM,
1327     + AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
1328     DMA_FROM_DEVICE, &paddr);
1329     if (ret) {
1330     netdev_err(ndev, "can't map skb for Rx\n");
1331     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1332     index 75896d6ba6e2..99ea5c4ce29c 100644
1333     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1334     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1335     @@ -2547,12 +2547,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1336     netdev_warn(priv->dev, "PTP init failed\n");
1337     }
1338    
1339     -#ifdef CONFIG_DEBUG_FS
1340     - ret = stmmac_init_fs(dev);
1341     - if (ret < 0)
1342     - netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1343     - __func__);
1344     -#endif
1345     priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1346    
1347     if (priv->use_riwt) {
1348     @@ -2753,10 +2747,6 @@ static int stmmac_release(struct net_device *dev)
1349    
1350     netif_carrier_off(dev);
1351    
1352     -#ifdef CONFIG_DEBUG_FS
1353     - stmmac_exit_fs(dev);
1354     -#endif
1355     -
1356     stmmac_release_ptp(priv);
1357    
1358     return 0;
1359     @@ -3896,6 +3886,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
1360     u32 tx_count = priv->plat->tx_queues_to_use;
1361     u32 queue;
1362    
1363     + if ((dev->flags & IFF_UP) == 0)
1364     + return 0;
1365     +
1366     for (queue = 0; queue < rx_count; queue++) {
1367     struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1368    
1369     @@ -4394,6 +4387,13 @@ int stmmac_dvr_probe(struct device *device,
1370     goto error_netdev_register;
1371     }
1372    
1373     +#ifdef CONFIG_DEBUG_FS
1374     + ret = stmmac_init_fs(ndev);
1375     + if (ret < 0)
1376     + netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1377     + __func__);
1378     +#endif
1379     +
1380     return ret;
1381    
1382     error_netdev_register:
1383     @@ -4429,6 +4429,9 @@ int stmmac_dvr_remove(struct device *dev)
1384    
1385     netdev_info(priv->dev, "%s: removing driver", __func__);
1386    
1387     +#ifdef CONFIG_DEBUG_FS
1388     + stmmac_exit_fs(ndev);
1389     +#endif
1390     stmmac_stop_all_dma(priv);
1391    
1392     stmmac_mac_set(priv, priv->ioaddr, false);
1393     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
1394     index cfda146f3b3b..6372cdc4a510 100644
1395     --- a/drivers/net/macvlan.c
1396     +++ b/drivers/net/macvlan.c
1397     @@ -608,7 +608,7 @@ static int macvlan_open(struct net_device *dev)
1398     goto hash_add;
1399     }
1400    
1401     - err = -EBUSY;
1402     + err = -EADDRINUSE;
1403     if (macvlan_addr_busy(vlan->port, dev->dev_addr))
1404     goto out;
1405    
1406     @@ -706,7 +706,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
1407     } else {
1408     /* Rehash and update the device filters */
1409     if (macvlan_addr_busy(vlan->port, addr))
1410     - return -EBUSY;
1411     + return -EADDRINUSE;
1412    
1413     if (!macvlan_passthru(port)) {
1414     err = dev_uc_add(lowerdev, addr);
1415     @@ -747,6 +747,9 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
1416     return dev_set_mac_address(vlan->lowerdev, addr);
1417     }
1418    
1419     + if (macvlan_addr_busy(vlan->port, addr->sa_data))
1420     + return -EADDRINUSE;
1421     +
1422     return macvlan_sync_address(dev, addr->sa_data);
1423     }
1424    
1425     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1426     index 62ab42e94c9d..4ca6592f5b3a 100644
1427     --- a/drivers/net/wireless/mac80211_hwsim.c
1428     +++ b/drivers/net/wireless/mac80211_hwsim.c
1429     @@ -3712,16 +3712,16 @@ static int __init init_mac80211_hwsim(void)
1430     if (err)
1431     goto out_unregister_pernet;
1432    
1433     + err = hwsim_init_netlink();
1434     + if (err)
1435     + goto out_unregister_driver;
1436     +
1437     hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
1438     if (IS_ERR(hwsim_class)) {
1439     err = PTR_ERR(hwsim_class);
1440     - goto out_unregister_driver;
1441     + goto out_exit_netlink;
1442     }
1443    
1444     - err = hwsim_init_netlink();
1445     - if (err < 0)
1446     - goto out_unregister_driver;
1447     -
1448     for (i = 0; i < radios; i++) {
1449     struct hwsim_new_radio_params param = { 0 };
1450    
1451     @@ -3827,6 +3827,8 @@ out_free_mon:
1452     free_netdev(hwsim_mon);
1453     out_free_radios:
1454     mac80211_hwsim_free();
1455     +out_exit_netlink:
1456     + hwsim_exit_netlink();
1457     out_unregister_driver:
1458     platform_driver_unregister(&mac80211_hwsim_driver);
1459     out_unregister_pernet:
1460     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1461     index b7b2659e02fa..e5bddae16ed4 100644
1462     --- a/drivers/nvme/host/core.c
1463     +++ b/drivers/nvme/host/core.c
1464     @@ -831,6 +831,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
1465     static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
1466     {
1467     struct nvme_ctrl *ctrl = rq->end_io_data;
1468     + unsigned long flags;
1469     + bool startka = false;
1470    
1471     blk_mq_free_request(rq);
1472    
1473     @@ -841,7 +843,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
1474     return;
1475     }
1476    
1477     - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
1478     + spin_lock_irqsave(&ctrl->lock, flags);
1479     + if (ctrl->state == NVME_CTRL_LIVE ||
1480     + ctrl->state == NVME_CTRL_CONNECTING)
1481     + startka = true;
1482     + spin_unlock_irqrestore(&ctrl->lock, flags);
1483     + if (startka)
1484     + schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
1485     }
1486    
1487     static int nvme_keep_alive(struct nvme_ctrl *ctrl)
1488     diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
1489     index bfc4da660bb4..e57f3902beb3 100644
1490     --- a/drivers/nvme/target/rdma.c
1491     +++ b/drivers/nvme/target/rdma.c
1492     @@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1493     {
1494     struct nvmet_rdma_rsp *rsp =
1495     container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
1496     + struct nvmet_rdma_queue *queue = cq->cq_context;
1497    
1498     nvmet_rdma_release_rsp(rsp);
1499    
1500     @@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1501     wc->status != IB_WC_WR_FLUSH_ERR)) {
1502     pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
1503     wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
1504     - nvmet_rdma_error_comp(rsp->queue);
1505     + nvmet_rdma_error_comp(queue);
1506     }
1507     }
1508    
1509     diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
1510     index 5c8ed7350a04..a36e4cf1841d 100644
1511     --- a/drivers/sbus/char/display7seg.c
1512     +++ b/drivers/sbus/char/display7seg.c
1513     @@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op)
1514     dev_set_drvdata(&op->dev, p);
1515     d7s_device = p;
1516     err = 0;
1517     + of_node_put(opts);
1518    
1519     out:
1520     return err;
1521     diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
1522     index 56e962a01493..b8481927bfe4 100644
1523     --- a/drivers/sbus/char/envctrl.c
1524     +++ b/drivers/sbus/char/envctrl.c
1525     @@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
1526     for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
1527     pchild->mon_type[len] = ENVCTRL_NOMON;
1528     }
1529     + of_node_put(root_node);
1530     return;
1531     }
1532     + of_node_put(root_node);
1533     }
1534    
1535     /* Get the monitor channels. */
1536     diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1537     index 93c66ebad907..f78d2e5c1471 100644
1538     --- a/drivers/scsi/libiscsi.c
1539     +++ b/drivers/scsi/libiscsi.c
1540     @@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
1541     failed:
1542     ISCSI_DBG_EH(session,
1543     "failing session reset: Could not log back into "
1544     - "%s, %s [age %d]\n", session->targetname,
1545     - conn->persistent_address, session->age);
1546     + "%s [age %d]\n", session->targetname,
1547     + session->age);
1548     spin_unlock_bh(&session->frwd_lock);
1549     mutex_unlock(&session->eh_mutex);
1550     return FAILED;
1551     diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
1552     index 0cd947f78b5b..890b8aaf95e1 100644
1553     --- a/drivers/scsi/vmw_pvscsi.c
1554     +++ b/drivers/scsi/vmw_pvscsi.c
1555     @@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1556    
1557     static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1558     {
1559     - pvscsi_shutdown_intr(adapter);
1560     -
1561     if (adapter->workqueue)
1562     destroy_workqueue(adapter->workqueue);
1563    
1564     @@ -1535,6 +1533,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1565     out_reset_adapter:
1566     ll_adapter_reset(adapter);
1567     out_release_resources:
1568     + pvscsi_shutdown_intr(adapter);
1569     pvscsi_release_resources(adapter);
1570     scsi_host_put(host);
1571     out_disable_device:
1572     @@ -1543,6 +1542,7 @@ out_disable_device:
1573     return error;
1574    
1575     out_release_resources_and_disable:
1576     + pvscsi_shutdown_intr(adapter);
1577     pvscsi_release_resources(adapter);
1578     goto out_disable_device;
1579     }
1580     diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
1581     index 2c2f6d93034e..e16b3cb1808c 100644
1582     --- a/drivers/thermal/armada_thermal.c
1583     +++ b/drivers/thermal/armada_thermal.c
1584     @@ -357,7 +357,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal,
1585     int ret;
1586    
1587     /* Valid check */
1588     - if (armada_is_valid(priv)) {
1589     + if (!armada_is_valid(priv)) {
1590     dev_err(priv->dev,
1591     "Temperature sensor reading not valid\n");
1592     return -EIO;
1593     diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
1594     index 70a4ea4eaa6e..990376576970 100644
1595     --- a/drivers/tty/serial/suncore.c
1596     +++ b/drivers/tty/serial/suncore.c
1597     @@ -112,6 +112,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
1598     mode = of_get_property(dp, mode_prop, NULL);
1599     if (!mode)
1600     mode = "9600,8,n,1,-";
1601     + of_node_put(dp);
1602     }
1603    
1604     cflag = CREAD | HUPCL | CLOCAL;
1605     diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
1606     index 51879ed18652..98ed5be132c6 100644
1607     --- a/drivers/vhost/vsock.c
1608     +++ b/drivers/vhost/vsock.c
1609     @@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
1610     * executing.
1611     */
1612    
1613     - if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
1614     - sock_set_flag(sk, SOCK_DONE);
1615     - vsk->peer_shutdown = SHUTDOWN_MASK;
1616     - sk->sk_state = SS_UNCONNECTED;
1617     - sk->sk_err = ECONNRESET;
1618     - sk->sk_error_report(sk);
1619     - }
1620     + /* If the peer is still valid, no need to reset connection */
1621     + if (vhost_vsock_get(vsk->remote_addr.svm_cid))
1622     + return;
1623     +
1624     + /* If the close timeout is pending, let it expire. This avoids races
1625     + * with the timeout callback.
1626     + */
1627     + if (vsk->close_work_scheduled)
1628     + return;
1629     +
1630     + sock_set_flag(sk, SOCK_DONE);
1631     + vsk->peer_shutdown = SHUTDOWN_MASK;
1632     + sk->sk_state = SS_UNCONNECTED;
1633     + sk->sk_err = ECONNRESET;
1634     + sk->sk_error_report(sk);
1635     }
1636    
1637     static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
1638     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
1639     index 834a3f5ef642..d4a7f7ca4145 100644
1640     --- a/fs/btrfs/disk-io.c
1641     +++ b/fs/btrfs/disk-io.c
1642     @@ -1656,9 +1656,8 @@ static int cleaner_kthread(void *arg)
1643     struct btrfs_root *root = arg;
1644     struct btrfs_fs_info *fs_info = root->fs_info;
1645     int again;
1646     - struct btrfs_trans_handle *trans;
1647    
1648     - do {
1649     + while (1) {
1650     again = 0;
1651    
1652     /* Make the cleaner go to sleep early. */
1653     @@ -1707,42 +1706,16 @@ static int cleaner_kthread(void *arg)
1654     */
1655     btrfs_delete_unused_bgs(fs_info);
1656     sleep:
1657     + if (kthread_should_park())
1658     + kthread_parkme();
1659     + if (kthread_should_stop())
1660     + return 0;
1661     if (!again) {
1662     set_current_state(TASK_INTERRUPTIBLE);
1663     - if (!kthread_should_stop())
1664     - schedule();
1665     + schedule();
1666     __set_current_state(TASK_RUNNING);
1667     }
1668     - } while (!kthread_should_stop());
1669     -
1670     - /*
1671     - * Transaction kthread is stopped before us and wakes us up.
1672     - * However we might have started a new transaction and COWed some
1673     - * tree blocks when deleting unused block groups for example. So
1674     - * make sure we commit the transaction we started to have a clean
1675     - * shutdown when evicting the btree inode - if it has dirty pages
1676     - * when we do the final iput() on it, eviction will trigger a
1677     - * writeback for it which will fail with null pointer dereferences
1678     - * since work queues and other resources were already released and
1679     - * destroyed by the time the iput/eviction/writeback is made.
1680     - */
1681     - trans = btrfs_attach_transaction(root);
1682     - if (IS_ERR(trans)) {
1683     - if (PTR_ERR(trans) != -ENOENT)
1684     - btrfs_err(fs_info,
1685     - "cleaner transaction attach returned %ld",
1686     - PTR_ERR(trans));
1687     - } else {
1688     - int ret;
1689     -
1690     - ret = btrfs_commit_transaction(trans);
1691     - if (ret)
1692     - btrfs_err(fs_info,
1693     - "cleaner open transaction commit returned %d",
1694     - ret);
1695     }
1696     -
1697     - return 0;
1698     }
1699    
1700     static int transaction_kthread(void *arg)
1701     @@ -3923,6 +3896,13 @@ void close_ctree(struct btrfs_fs_info *fs_info)
1702     int ret;
1703    
1704     set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
1705     + /*
1706     + * We don't want the cleaner to start new transactions, add more delayed
1707     + * iputs, etc. while we're closing. We can't use kthread_stop() yet
1708     + * because that frees the task_struct, and the transaction kthread might
1709     + * still try to wake up the cleaner.
1710     + */
1711     + kthread_park(fs_info->cleaner_kthread);
1712    
1713     /* wait for the qgroup rescan worker to stop */
1714     btrfs_qgroup_wait_for_completion(fs_info, false);
1715     @@ -3950,9 +3930,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
1716    
1717     if (!sb_rdonly(fs_info->sb)) {
1718     /*
1719     - * If the cleaner thread is stopped and there are
1720     - * block groups queued for removal, the deletion will be
1721     - * skipped when we quit the cleaner thread.
1722     + * The cleaner kthread is stopped, so do one final pass over
1723     + * unused block groups.
1724     */
1725     btrfs_delete_unused_bgs(fs_info);
1726    
1727     diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
1728     index abcd78e332fe..85dadb93c992 100644
1729     --- a/fs/cifs/Kconfig
1730     +++ b/fs/cifs/Kconfig
1731     @@ -133,7 +133,7 @@ config CIFS_XATTR
1732    
1733     config CIFS_POSIX
1734     bool "CIFS POSIX Extensions"
1735     - depends on CIFS_XATTR
1736     + depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
1737     help
1738     Enabling this option will cause the cifs client to attempt to
1739     negotiate a newer dialect with servers, such as Samba 3.0.5
1740     diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
1741     index aa12c3063bae..33824a0a57bf 100644
1742     --- a/fs/nfs/direct.c
1743     +++ b/fs/nfs/direct.c
1744     @@ -98,8 +98,11 @@ struct nfs_direct_req {
1745     struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
1746     struct work_struct work;
1747     int flags;
1748     + /* for write */
1749     #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
1750     #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
1751     + /* for read */
1752     +#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
1753     struct nfs_writeverf verf; /* unstable write verifier */
1754     };
1755    
1756     @@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
1757     struct nfs_page *req = nfs_list_entry(hdr->pages.next);
1758     struct page *page = req->wb_page;
1759    
1760     - if (!PageCompound(page) && bytes < hdr->good_bytes)
1761     + if (!PageCompound(page) && bytes < hdr->good_bytes &&
1762     + (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
1763     set_page_dirty(page);
1764     bytes += req->wb_bytes;
1765     nfs_list_remove_request(req);
1766     @@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
1767     if (!is_sync_kiocb(iocb))
1768     dreq->iocb = iocb;
1769    
1770     + if (iter_is_iovec(iter))
1771     + dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
1772     +
1773     nfs_start_io_direct(inode);
1774    
1775     NFS_I(inode)->read_io += count;
1776     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1777     index 56acfbb80104..2954e4b3abd5 100644
1778     --- a/kernel/bpf/verifier.c
1779     +++ b/kernel/bpf/verifier.c
1780     @@ -4792,6 +4792,9 @@ static int do_check(struct bpf_verifier_env *env)
1781     goto process_bpf_exit;
1782     }
1783    
1784     + if (signal_pending(current))
1785     + return -EAGAIN;
1786     +
1787     if (need_resched())
1788     cond_resched();
1789    
1790     diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
1791     index bfaeb05123ff..edd75e0c7d96 100644
1792     --- a/kernel/locking/qspinlock.c
1793     +++ b/kernel/locking/qspinlock.c
1794     @@ -231,6 +231,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
1795     }
1796     #endif /* _Q_PENDING_BITS == 8 */
1797    
1798     +/**
1799     + * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
1800     + * @lock : Pointer to queued spinlock structure
1801     + * Return: The previous lock value
1802     + *
1803     + * *,*,* -> *,1,*
1804     + */
1805     +#ifndef queued_fetch_set_pending_acquire
1806     +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
1807     +{
1808     + return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
1809     +}
1810     +#endif
1811     +
1812     /**
1813     * set_locked - Set the lock bit and own the lock
1814     * @lock: Pointer to queued spinlock structure
1815     @@ -329,40 +343,39 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
1816     * 0,0,0 -> 0,0,1 ; trylock
1817     * 0,0,1 -> 0,1,1 ; pending
1818     */
1819     - val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
1820     - if (!(val & ~_Q_LOCKED_MASK)) {
1821     - /*
1822     - * We're pending, wait for the owner to go away.
1823     - *
1824     - * *,1,1 -> *,1,0
1825     - *
1826     - * this wait loop must be a load-acquire such that we match the
1827     - * store-release that clears the locked bit and create lock
1828     - * sequentiality; this is because not all
1829     - * clear_pending_set_locked() implementations imply full
1830     - * barriers.
1831     - */
1832     - if (val & _Q_LOCKED_MASK) {
1833     - atomic_cond_read_acquire(&lock->val,
1834     - !(VAL & _Q_LOCKED_MASK));
1835     - }
1836     + val = queued_fetch_set_pending_acquire(lock);
1837    
1838     - /*
1839     - * take ownership and clear the pending bit.
1840     - *
1841     - * *,1,0 -> *,0,1
1842     - */
1843     - clear_pending_set_locked(lock);
1844     - qstat_inc(qstat_lock_pending, true);
1845     - return;
1846     + /*
1847     + * If we observe any contention; undo and queue.
1848     + */
1849     + if (unlikely(val & ~_Q_LOCKED_MASK)) {
1850     + if (!(val & _Q_PENDING_MASK))
1851     + clear_pending(lock);
1852     + goto queue;
1853     }
1854    
1855     /*
1856     - * If pending was clear but there are waiters in the queue, then
1857     - * we need to undo our setting of pending before we queue ourselves.
1858     + * We're pending, wait for the owner to go away.
1859     + *
1860     + * 0,1,1 -> 0,1,0
1861     + *
1862     + * this wait loop must be a load-acquire such that we match the
1863     + * store-release that clears the locked bit and create lock
1864     + * sequentiality; this is because not all
1865     + * clear_pending_set_locked() implementations imply full
1866     + * barriers.
1867     + */
1868     + if (val & _Q_LOCKED_MASK)
1869     + atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
1870     +
1871     + /*
1872     + * take ownership and clear the pending bit.
1873     + *
1874     + * 0,1,0 -> 0,0,1
1875     */
1876     - if (!(val & _Q_PENDING_MASK))
1877     - clear_pending(lock);
1878     + clear_pending_set_locked(lock);
1879     + qstat_inc(qstat_lock_pending, true);
1880     + return;
1881    
1882     /*
1883     * End of pending bit optimistic spinning and beginning of MCS
1884     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
1885     index a8db2e3f8904..d066aae3cb6d 100644
1886     --- a/net/sunrpc/xprt.c
1887     +++ b/net/sunrpc/xprt.c
1888     @@ -781,8 +781,15 @@ void xprt_connect(struct rpc_task *task)
1889     return;
1890     if (xprt_test_and_set_connecting(xprt))
1891     return;
1892     - xprt->stat.connect_start = jiffies;
1893     - xprt->ops->connect(xprt, task);
1894     + /* Race breaker */
1895     + if (!xprt_connected(xprt)) {
1896     + xprt->stat.connect_start = jiffies;
1897     + xprt->ops->connect(xprt, task);
1898     + } else {
1899     + xprt_clear_connecting(xprt);
1900     + task->tk_status = 0;
1901     + rpc_wake_up_queued_task(&xprt->pending, task);
1902     + }
1903     }
1904     xprt_release_write(xprt, task);
1905     }
1906     diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
1907     index ec50d2a95076..fa763dbfdad7 100644
1908     --- a/tools/testing/nvdimm/test/nfit.c
1909     +++ b/tools/testing/nvdimm/test/nfit.c
1910     @@ -15,6 +15,7 @@
1911     #include <linux/dma-mapping.h>
1912     #include <linux/workqueue.h>
1913     #include <linux/libnvdimm.h>
1914     +#include <linux/genalloc.h>
1915     #include <linux/vmalloc.h>
1916     #include <linux/device.h>
1917     #include <linux/module.h>
1918     @@ -213,6 +214,8 @@ struct nfit_test {
1919    
1920     static struct workqueue_struct *nfit_wq;
1921    
1922     +static struct gen_pool *nfit_pool;
1923     +
1924     static struct nfit_test *to_nfit_test(struct device *dev)
1925     {
1926     struct platform_device *pdev = to_platform_device(dev);
1927     @@ -1130,6 +1133,9 @@ static void release_nfit_res(void *data)
1928     list_del(&nfit_res->list);
1929     spin_unlock(&nfit_test_lock);
1930    
1931     + if (resource_size(&nfit_res->res) >= DIMM_SIZE)
1932     + gen_pool_free(nfit_pool, nfit_res->res.start,
1933     + resource_size(&nfit_res->res));
1934     vfree(nfit_res->buf);
1935     kfree(nfit_res);
1936     }
1937     @@ -1142,7 +1148,7 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
1938     GFP_KERNEL);
1939     int rc;
1940    
1941     - if (!buf || !nfit_res)
1942     + if (!buf || !nfit_res || !*dma)
1943     goto err;
1944     rc = devm_add_action(dev, release_nfit_res, nfit_res);
1945     if (rc)
1946     @@ -1162,6 +1168,8 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
1947    
1948     return nfit_res->buf;
1949     err:
1950     + if (*dma && size >= DIMM_SIZE)
1951     + gen_pool_free(nfit_pool, *dma, size);
1952     if (buf)
1953     vfree(buf);
1954     kfree(nfit_res);
1955     @@ -1170,9 +1178,16 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
1956    
1957     static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
1958     {
1959     + struct genpool_data_align data = {
1960     + .align = SZ_128M,
1961     + };
1962     void *buf = vmalloc(size);
1963    
1964     - *dma = (unsigned long) buf;
1965     + if (size >= DIMM_SIZE)
1966     + *dma = gen_pool_alloc_algo(nfit_pool, size,
1967     + gen_pool_first_fit_align, &data);
1968     + else
1969     + *dma = (unsigned long) buf;
1970     return __test_alloc(t, size, dma, buf);
1971     }
1972    
1973     @@ -2837,6 +2852,17 @@ static __init int nfit_test_init(void)
1974     goto err_register;
1975     }
1976    
1977     + nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
1978     + if (!nfit_pool) {
1979     + rc = -ENOMEM;
1980     + goto err_register;
1981     + }
1982     +
1983     + if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
1984     + rc = -ENOMEM;
1985     + goto err_register;
1986     + }
1987     +
1988     for (i = 0; i < NUM_NFITS; i++) {
1989     struct nfit_test *nfit_test;
1990     struct platform_device *pdev;
1991     @@ -2892,6 +2918,9 @@ static __init int nfit_test_init(void)
1992     return 0;
1993    
1994     err_register:
1995     + if (nfit_pool)
1996     + gen_pool_destroy(nfit_pool);
1997     +
1998     destroy_workqueue(nfit_wq);
1999     for (i = 0; i < NUM_NFITS; i++)
2000     if (instances[i])
2001     @@ -2915,6 +2944,8 @@ static __exit void nfit_test_exit(void)
2002     platform_driver_unregister(&nfit_test_driver);
2003     nfit_test_teardown();
2004    
2005     + gen_pool_destroy(nfit_pool);
2006     +
2007     for (i = 0; i < NUM_NFITS; i++)
2008     put_device(&instances[i]->pdev.dev);
2009     class_destroy(nfit_test_dimm);
2010     diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
2011     index 6b5cfeb7a9cc..29116366a9fc 100644
2012     --- a/tools/testing/selftests/bpf/test_btf.c
2013     +++ b/tools/testing/selftests/bpf/test_btf.c
2014     @@ -431,11 +431,11 @@ static struct btf_raw_test raw_tests[] = {
2015     /* const void* */ /* [3] */
2016     BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
2017     /* typedef const void * const_void_ptr */
2018     - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
2019     - /* struct A { */ /* [4] */
2020     + BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
2021     + /* struct A { */ /* [5] */
2022     BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
2023     /* const_void_ptr m; */
2024     - BTF_MEMBER_ENC(NAME_TBD, 3, 0),
2025     + BTF_MEMBER_ENC(NAME_TBD, 4, 0),
2026     /* } */
2027     BTF_END_RAW,
2028     },
2029     @@ -493,10 +493,10 @@ static struct btf_raw_test raw_tests[] = {
2030     BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
2031     /* const void* */ /* [3] */
2032     BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
2033     - /* typedef const void * const_void_ptr */ /* [4] */
2034     - BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
2035     - /* const_void_ptr[4] */ /* [5] */
2036     - BTF_TYPE_ARRAY_ENC(3, 1, 4),
2037     + /* typedef const void * const_void_ptr */
2038     + BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
2039     + /* const_void_ptr[4] */
2040     + BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [5] */
2041     BTF_END_RAW,
2042     },
2043     .str_sec = "\0const_void_ptr",
2044     @@ -1291,6 +1291,367 @@ static struct btf_raw_test raw_tests[] = {
2045     .err_str = "type != 0",
2046     },
2047    
2048     +{
2049     + .descr = "typedef (invalid name, name_off = 0)",
2050     + .raw_types = {
2051     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2052     + BTF_TYPEDEF_ENC(0, 1), /* [2] */
2053     + BTF_END_RAW,
2054     + },
2055     + .str_sec = "\0__int",
2056     + .str_sec_size = sizeof("\0__int"),
2057     + .map_type = BPF_MAP_TYPE_ARRAY,
2058     + .map_name = "typedef_check_btf",
2059     + .key_size = sizeof(int),
2060     + .value_size = sizeof(int),
2061     + .key_type_id = 1,
2062     + .value_type_id = 1,
2063     + .max_entries = 4,
2064     + .btf_load_err = true,
2065     + .err_str = "Invalid name",
2066     +},
2067     +
2068     +{
2069     + .descr = "typedef (invalid name, invalid identifier)",
2070     + .raw_types = {
2071     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2072     + BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
2073     + BTF_END_RAW,
2074     + },
2075     + .str_sec = "\0__!int",
2076     + .str_sec_size = sizeof("\0__!int"),
2077     + .map_type = BPF_MAP_TYPE_ARRAY,
2078     + .map_name = "typedef_check_btf",
2079     + .key_size = sizeof(int),
2080     + .value_size = sizeof(int),
2081     + .key_type_id = 1,
2082     + .value_type_id = 1,
2083     + .max_entries = 4,
2084     + .btf_load_err = true,
2085     + .err_str = "Invalid name",
2086     +},
2087     +
2088     +{
2089     + .descr = "ptr type (invalid name, name_off <> 0)",
2090     + .raw_types = {
2091     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2092     + BTF_TYPE_ENC(NAME_TBD,
2093     + BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
2094     + BTF_END_RAW,
2095     + },
2096     + .str_sec = "\0__int",
2097     + .str_sec_size = sizeof("\0__int"),
2098     + .map_type = BPF_MAP_TYPE_ARRAY,
2099     + .map_name = "ptr_type_check_btf",
2100     + .key_size = sizeof(int),
2101     + .value_size = sizeof(int),
2102     + .key_type_id = 1,
2103     + .value_type_id = 1,
2104     + .max_entries = 4,
2105     + .btf_load_err = true,
2106     + .err_str = "Invalid name",
2107     +},
2108     +
2109     +{
2110     + .descr = "volatile type (invalid name, name_off <> 0)",
2111     + .raw_types = {
2112     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2113     + BTF_TYPE_ENC(NAME_TBD,
2114     + BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */
2115     + BTF_END_RAW,
2116     + },
2117     + .str_sec = "\0__int",
2118     + .str_sec_size = sizeof("\0__int"),
2119     + .map_type = BPF_MAP_TYPE_ARRAY,
2120     + .map_name = "volatile_type_check_btf",
2121     + .key_size = sizeof(int),
2122     + .value_size = sizeof(int),
2123     + .key_type_id = 1,
2124     + .value_type_id = 1,
2125     + .max_entries = 4,
2126     + .btf_load_err = true,
2127     + .err_str = "Invalid name",
2128     +},
2129     +
2130     +{
2131     + .descr = "const type (invalid name, name_off <> 0)",
2132     + .raw_types = {
2133     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2134     + BTF_TYPE_ENC(NAME_TBD,
2135     + BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), /* [2] */
2136     + BTF_END_RAW,
2137     + },
2138     + .str_sec = "\0__int",
2139     + .str_sec_size = sizeof("\0__int"),
2140     + .map_type = BPF_MAP_TYPE_ARRAY,
2141     + .map_name = "const_type_check_btf",
2142     + .key_size = sizeof(int),
2143     + .value_size = sizeof(int),
2144     + .key_type_id = 1,
2145     + .value_type_id = 1,
2146     + .max_entries = 4,
2147     + .btf_load_err = true,
2148     + .err_str = "Invalid name",
2149     +},
2150     +
2151     +{
2152     + .descr = "restrict type (invalid name, name_off <> 0)",
2153     + .raw_types = {
2154     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2155     + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
2156     + BTF_TYPE_ENC(NAME_TBD,
2157     + BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */
2158     + BTF_END_RAW,
2159     + },
2160     + .str_sec = "\0__int",
2161     + .str_sec_size = sizeof("\0__int"),
2162     + .map_type = BPF_MAP_TYPE_ARRAY,
2163     + .map_name = "restrict_type_check_btf",
2164     + .key_size = sizeof(int),
2165     + .value_size = sizeof(int),
2166     + .key_type_id = 1,
2167     + .value_type_id = 1,
2168     + .max_entries = 4,
2169     + .btf_load_err = true,
2170     + .err_str = "Invalid name",
2171     +},
2172     +
2173     +{
2174     + .descr = "fwd type (invalid name, name_off = 0)",
2175     + .raw_types = {
2176     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2177     + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
2178     + BTF_END_RAW,
2179     + },
2180     + .str_sec = "\0__skb",
2181     + .str_sec_size = sizeof("\0__skb"),
2182     + .map_type = BPF_MAP_TYPE_ARRAY,
2183     + .map_name = "fwd_type_check_btf",
2184     + .key_size = sizeof(int),
2185     + .value_size = sizeof(int),
2186     + .key_type_id = 1,
2187     + .value_type_id = 1,
2188     + .max_entries = 4,
2189     + .btf_load_err = true,
2190     + .err_str = "Invalid name",
2191     +},
2192     +
2193     +{
2194     + .descr = "fwd type (invalid name, invalid identifier)",
2195     + .raw_types = {
2196     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2197     + BTF_TYPE_ENC(NAME_TBD,
2198     + BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
2199     + BTF_END_RAW,
2200     + },
2201     + .str_sec = "\0__!skb",
2202     + .str_sec_size = sizeof("\0__!skb"),
2203     + .map_type = BPF_MAP_TYPE_ARRAY,
2204     + .map_name = "fwd_type_check_btf",
2205     + .key_size = sizeof(int),
2206     + .value_size = sizeof(int),
2207     + .key_type_id = 1,
2208     + .value_type_id = 1,
2209     + .max_entries = 4,
2210     + .btf_load_err = true,
2211     + .err_str = "Invalid name",
2212     +},
2213     +
2214     +{
2215     + .descr = "array type (invalid name, name_off <> 0)",
2216     + .raw_types = {
2217     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2218     + BTF_TYPE_ENC(NAME_TBD,
2219     + BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), /* [2] */
2220     + BTF_ARRAY_ENC(1, 1, 4),
2221     + BTF_END_RAW,
2222     + },
2223     + .str_sec = "\0__skb",
2224     + .str_sec_size = sizeof("\0__skb"),
2225     + .map_type = BPF_MAP_TYPE_ARRAY,
2226     + .map_name = "array_type_check_btf",
2227     + .key_size = sizeof(int),
2228     + .value_size = sizeof(int),
2229     + .key_type_id = 1,
2230     + .value_type_id = 1,
2231     + .max_entries = 4,
2232     + .btf_load_err = true,
2233     + .err_str = "Invalid name",
2234     +},
2235     +
2236     +{
2237     + .descr = "struct type (name_off = 0)",
2238     + .raw_types = {
2239     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2240     + BTF_TYPE_ENC(0,
2241     + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
2242     + BTF_MEMBER_ENC(NAME_TBD, 1, 0),
2243     + BTF_END_RAW,
2244     + },
2245     + .str_sec = "\0A",
2246     + .str_sec_size = sizeof("\0A"),
2247     + .map_type = BPF_MAP_TYPE_ARRAY,
2248     + .map_name = "struct_type_check_btf",
2249     + .key_size = sizeof(int),
2250     + .value_size = sizeof(int),
2251     + .key_type_id = 1,
2252     + .value_type_id = 1,
2253     + .max_entries = 4,
2254     +},
2255     +
2256     +{
2257     + .descr = "struct type (invalid name, invalid identifier)",
2258     + .raw_types = {
2259     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2260     + BTF_TYPE_ENC(NAME_TBD,
2261     + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
2262     + BTF_MEMBER_ENC(NAME_TBD, 1, 0),
2263     + BTF_END_RAW,
2264     + },
2265     + .str_sec = "\0A!\0B",
2266     + .str_sec_size = sizeof("\0A!\0B"),
2267     + .map_type = BPF_MAP_TYPE_ARRAY,
2268     + .map_name = "struct_type_check_btf",
2269     + .key_size = sizeof(int),
2270     + .value_size = sizeof(int),
2271     + .key_type_id = 1,
2272     + .value_type_id = 1,
2273     + .max_entries = 4,
2274     + .btf_load_err = true,
2275     + .err_str = "Invalid name",
2276     +},
2277     +
2278     +{
2279     + .descr = "struct member (name_off = 0)",
2280     + .raw_types = {
2281     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2282     + BTF_TYPE_ENC(0,
2283     + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
2284     + BTF_MEMBER_ENC(NAME_TBD, 1, 0),
2285     + BTF_END_RAW,
2286     + },
2287     + .str_sec = "\0A",
2288     + .str_sec_size = sizeof("\0A"),
2289     + .map_type = BPF_MAP_TYPE_ARRAY,
2290     + .map_name = "struct_type_check_btf",
2291     + .key_size = sizeof(int),
2292     + .value_size = sizeof(int),
2293     + .key_type_id = 1,
2294     + .value_type_id = 1,
2295     + .max_entries = 4,
2296     +},
2297     +
2298     +{
2299     + .descr = "struct member (invalid name, invalid identifier)",
2300     + .raw_types = {
2301     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2302     + BTF_TYPE_ENC(NAME_TBD,
2303     + BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
2304     + BTF_MEMBER_ENC(NAME_TBD, 1, 0),
2305     + BTF_END_RAW,
2306     + },
2307     + .str_sec = "\0A\0B*",
2308     + .str_sec_size = sizeof("\0A\0B*"),
2309     + .map_type = BPF_MAP_TYPE_ARRAY,
2310     + .map_name = "struct_type_check_btf",
2311     + .key_size = sizeof(int),
2312     + .value_size = sizeof(int),
2313     + .key_type_id = 1,
2314     + .value_type_id = 1,
2315     + .max_entries = 4,
2316     + .btf_load_err = true,
2317     + .err_str = "Invalid name",
2318     +},
2319     +
2320     +{
2321     + .descr = "enum type (name_off = 0)",
2322     + .raw_types = {
2323     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2324     + BTF_TYPE_ENC(0,
2325     + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
2326     + sizeof(int)), /* [2] */
2327     + BTF_ENUM_ENC(NAME_TBD, 0),
2328     + BTF_END_RAW,
2329     + },
2330     + .str_sec = "\0A\0B",
2331     + .str_sec_size = sizeof("\0A\0B"),
2332     + .map_type = BPF_MAP_TYPE_ARRAY,
2333     + .map_name = "enum_type_check_btf",
2334     + .key_size = sizeof(int),
2335     + .value_size = sizeof(int),
2336     + .key_type_id = 1,
2337     + .value_type_id = 1,
2338     + .max_entries = 4,
2339     +},
2340     +
2341     +{
2342     + .descr = "enum type (invalid name, invalid identifier)",
2343     + .raw_types = {
2344     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2345     + BTF_TYPE_ENC(NAME_TBD,
2346     + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
2347     + sizeof(int)), /* [2] */
2348     + BTF_ENUM_ENC(NAME_TBD, 0),
2349     + BTF_END_RAW,
2350     + },
2351     + .str_sec = "\0A!\0B",
2352     + .str_sec_size = sizeof("\0A!\0B"),
2353     + .map_type = BPF_MAP_TYPE_ARRAY,
2354     + .map_name = "enum_type_check_btf",
2355     + .key_size = sizeof(int),
2356     + .value_size = sizeof(int),
2357     + .key_type_id = 1,
2358     + .value_type_id = 1,
2359     + .max_entries = 4,
2360     + .btf_load_err = true,
2361     + .err_str = "Invalid name",
2362     +},
2363     +
2364     +{
2365     + .descr = "enum member (invalid name, name_off = 0)",
2366     + .raw_types = {
2367     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2368     + BTF_TYPE_ENC(0,
2369     + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
2370     + sizeof(int)), /* [2] */
2371     + BTF_ENUM_ENC(0, 0),
2372     + BTF_END_RAW,
2373     + },
2374     + .str_sec = "",
2375     + .str_sec_size = sizeof(""),
2376     + .map_type = BPF_MAP_TYPE_ARRAY,
2377     + .map_name = "enum_type_check_btf",
2378     + .key_size = sizeof(int),
2379     + .value_size = sizeof(int),
2380     + .key_type_id = 1,
2381     + .value_type_id = 1,
2382     + .max_entries = 4,
2383     + .btf_load_err = true,
2384     + .err_str = "Invalid name",
2385     +},
2386     +
2387     +{
2388     + .descr = "enum member (invalid name, invalid identifier)",
2389     + .raw_types = {
2390     + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
2391     + BTF_TYPE_ENC(0,
2392     + BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
2393     + sizeof(int)), /* [2] */
2394     + BTF_ENUM_ENC(NAME_TBD, 0),
2395     + BTF_END_RAW,
2396     + },
2397     + .str_sec = "\0A!",
2398     + .str_sec_size = sizeof("\0A!"),
2399     + .map_type = BPF_MAP_TYPE_ARRAY,
2400     + .map_name = "enum_type_check_btf",
2401     + .key_size = sizeof(int),
2402     + .value_size = sizeof(int),
2403     + .key_type_id = 1,
2404     + .value_type_id = 1,
2405     + .max_entries = 4,
2406     + .btf_load_err = true,
2407     + .err_str = "Invalid name",
2408     +},
2409     {
2410     .descr = "arraymap invalid btf key (a bit field)",
2411     .raw_types = {
2412     diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
2413     index 2bde9ee04db7..e436b67f2426 100644
2414     --- a/tools/testing/selftests/bpf/test_verifier.c
2415     +++ b/tools/testing/selftests/bpf/test_verifier.c
2416     @@ -12765,7 +12765,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
2417    
2418     reject_from_alignment = fd_prog < 0 &&
2419     (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
2420     - strstr(bpf_vlog, "Unknown alignment.");
2421     + strstr(bpf_vlog, "misaligned");
2422     #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2423     if (reject_from_alignment) {
2424     printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",