Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0246-4.9.147-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3299 - (hide annotations) (download)
Tue Mar 12 10:43:08 2019 UTC (5 years, 2 months ago) by niro
File size: 70332 byte(s)
-linux-4.9.147
1 niro 3299 diff --git a/Makefile b/Makefile
2     index 0a150d2b3353..3cccc51a57ce 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 146
9     +SUBLEVEL = 147
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
14     index c22b181e8206..2f39d9b3886e 100644
15     --- a/arch/arc/include/asm/io.h
16     +++ b/arch/arc/include/asm/io.h
17     @@ -12,6 +12,7 @@
18     #include <linux/types.h>
19     #include <asm/byteorder.h>
20     #include <asm/page.h>
21     +#include <asm/unaligned.h>
22    
23     #ifdef CONFIG_ISA_ARCV2
24     #include <asm/barrier.h>
25     @@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
26     return w;
27     }
28    
29     +/*
30     + * {read,write}s{b,w,l}() repeatedly access the same IO address in
31     + * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
32     + * @count times
33     + */
34     +#define __raw_readsx(t,f) \
35     +static inline void __raw_reads##f(const volatile void __iomem *addr, \
36     + void *ptr, unsigned int count) \
37     +{ \
38     + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
39     + u##t *buf = ptr; \
40     + \
41     + if (!count) \
42     + return; \
43     + \
44     + /* Some ARC CPU's don't support unaligned accesses */ \
45     + if (is_aligned) { \
46     + do { \
47     + u##t x = __raw_read##f(addr); \
48     + *buf++ = x; \
49     + } while (--count); \
50     + } else { \
51     + do { \
52     + u##t x = __raw_read##f(addr); \
53     + put_unaligned(x, buf++); \
54     + } while (--count); \
55     + } \
56     +}
57     +
58     +#define __raw_readsb __raw_readsb
59     +__raw_readsx(8, b)
60     +#define __raw_readsw __raw_readsw
61     +__raw_readsx(16, w)
62     +#define __raw_readsl __raw_readsl
63     +__raw_readsx(32, l)
64     +
65     #define __raw_writeb __raw_writeb
66     static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
67     {
68     @@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
69    
70     }
71    
72     +#define __raw_writesx(t,f) \
73     +static inline void __raw_writes##f(volatile void __iomem *addr, \
74     + const void *ptr, unsigned int count) \
75     +{ \
76     + bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
77     + const u##t *buf = ptr; \
78     + \
79     + if (!count) \
80     + return; \
81     + \
82     + /* Some ARC CPU's don't support unaligned accesses */ \
83     + if (is_aligned) { \
84     + do { \
85     + __raw_write##f(*buf++, addr); \
86     + } while (--count); \
87     + } else { \
88     + do { \
89     + __raw_write##f(get_unaligned(buf++), addr); \
90     + } while (--count); \
91     + } \
92     +}
93     +
94     +#define __raw_writesb __raw_writesb
95     +__raw_writesx(8, b)
96     +#define __raw_writesw __raw_writesw
97     +__raw_writesx(16, w)
98     +#define __raw_writesl __raw_writesl
99     +__raw_writesx(32, l)
100     +
101     /*
102     * MMIO can also get buffered/optimized in micro-arch, so barriers needed
103     * Based on ARM model for the typical use case
104     @@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
105     #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
106     #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
107     #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
108     +#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
109     +#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
110     +#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
111    
112     #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
113     #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
114     #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
115     +#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
116     +#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
117     +#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
118    
119     /*
120     * Relaxed API for drivers which can handle barrier ordering themselves
121     diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h
122     index 8a3b56dfd35d..8f94addd9bce 100644
123     --- a/arch/arm/mach-mmp/cputype.h
124     +++ b/arch/arm/mach-mmp/cputype.h
125     @@ -43,10 +43,12 @@ static inline int cpu_is_pxa910(void)
126     #define cpu_is_pxa910() (0)
127     #endif
128    
129     -#ifdef CONFIG_CPU_MMP2
130     +#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
131     static inline int cpu_is_mmp2(void)
132     {
133     - return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
134     + return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
135     + (((mmp_chip_id & 0xfff) == 0x410) ||
136     + ((mmp_chip_id & 0xfff) == 0x610));
137     }
138     #else
139     #define cpu_is_mmp2() (0)
140     diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
141     index a134d8a13d00..11d699af30ed 100644
142     --- a/arch/arm/mm/cache-v7.S
143     +++ b/arch/arm/mm/cache-v7.S
144     @@ -359,14 +359,16 @@ v7_dma_inv_range:
145     ALT_UP(W(nop))
146     #endif
147     mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
148     + addne r0, r0, r2
149    
150     tst r1, r3
151     bic r1, r1, r3
152     mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
153     -1:
154     - mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
155     - add r0, r0, r2
156     cmp r0, r1
157     +1:
158     + mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
159     + addlo r0, r0, r2
160     + cmplo r0, r1
161     blo 1b
162     dsb st
163     ret lr
164     diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
165     index 816a7e44e6f1..d29927740a19 100644
166     --- a/arch/arm/mm/cache-v7m.S
167     +++ b/arch/arm/mm/cache-v7m.S
168     @@ -73,9 +73,11 @@
169     /*
170     * dcimvac: Invalidate data cache line by MVA to PoC
171     */
172     -.macro dcimvac, rt, tmp
173     - v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
174     +.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
175     +.macro dcimvac\c, rt, tmp
176     + v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
177     .endm
178     +.endr
179    
180     /*
181     * dccmvau: Clean data cache line by MVA to PoU
182     @@ -369,14 +371,16 @@ v7m_dma_inv_range:
183     tst r0, r3
184     bic r0, r0, r3
185     dccimvacne r0, r3
186     + addne r0, r0, r2
187     subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
188     tst r1, r3
189     bic r1, r1, r3
190     dccimvacne r1, r3
191     -1:
192     - dcimvac r0, r3
193     - add r0, r0, r2
194     cmp r0, r1
195     +1:
196     + dcimvaclo r0, r3
197     + addlo r0, r0, r2
198     + cmplo r0, r1
199     blo 1b
200     dsb st
201     ret lr
202     diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
203     index dab616a33b8d..f2197654be07 100644
204     --- a/arch/powerpc/kernel/msi.c
205     +++ b/arch/powerpc/kernel/msi.c
206     @@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
207     {
208     struct pci_controller *phb = pci_bus_to_host(dev->bus);
209    
210     - phb->controller_ops.teardown_msi_irqs(dev);
211     + /*
212     + * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
213     + * so check the pointer again.
214     + */
215     + if (phb->controller_ops.teardown_msi_irqs)
216     + phb->controller_ops.teardown_msi_irqs(dev);
217     }
218     diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
219     index eaba08076030..9e78e963afb8 100644
220     --- a/arch/x86/include/asm/qspinlock.h
221     +++ b/arch/x86/include/asm/qspinlock.h
222     @@ -4,6 +4,29 @@
223     #include <asm/cpufeature.h>
224     #include <asm-generic/qspinlock_types.h>
225     #include <asm/paravirt.h>
226     +#include <asm/rmwcc.h>
227     +
228     +#define _Q_PENDING_LOOPS (1 << 9)
229     +
230     +#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
231     +
232     +static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
233     +{
234     + GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
235     + "I", _Q_PENDING_OFFSET, "%0", c);
236     +}
237     +
238     +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
239     +{
240     + u32 val = 0;
241     +
242     + if (__queued_RMW_btsl(lock))
243     + val |= _Q_PENDING_VAL;
244     +
245     + val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
246     +
247     + return val;
248     +}
249    
250     #define queued_spin_unlock queued_spin_unlock
251     /**
252     @@ -14,7 +37,7 @@
253     */
254     static inline void native_queued_spin_unlock(struct qspinlock *lock)
255     {
256     - smp_store_release((u8 *)lock, 0);
257     + smp_store_release(&lock->locked, 0);
258     }
259    
260     #ifdef CONFIG_PARAVIRT_SPINLOCKS
261     diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
262     index 9d55f9b6e167..fc75415ae971 100644
263     --- a/arch/x86/include/asm/qspinlock_paravirt.h
264     +++ b/arch/x86/include/asm/qspinlock_paravirt.h
265     @@ -21,8 +21,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
266     *
267     * void __pv_queued_spin_unlock(struct qspinlock *lock)
268     * {
269     - * struct __qspinlock *l = (void *)lock;
270     - * u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
271     + * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
272     *
273     * if (likely(lockval == _Q_LOCKED_VAL))
274     * return;
275     diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
276     index 5fdacb322ceb..c3e6be110b7d 100644
277     --- a/arch/x86/platform/efi/early_printk.c
278     +++ b/arch/x86/platform/efi/early_printk.c
279     @@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
280     num--;
281     }
282    
283     - if (efi_x >= si->lfb_width) {
284     + if (efi_x + font->width > si->lfb_width) {
285     efi_x = 0;
286     efi_y += font->height;
287     }
288     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
289     index a166359ad5d4..35be49f5791d 100644
290     --- a/drivers/ata/libata-core.c
291     +++ b/drivers/ata/libata-core.c
292     @@ -4476,6 +4476,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
293     { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
294     { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
295     { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
296     + { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
297     { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
298    
299     /*
300     diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
301     index 61893fe73251..18b6c9b55b95 100644
302     --- a/drivers/clk/mmp/clk.c
303     +++ b/drivers/clk/mmp/clk.c
304     @@ -182,7 +182,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
305     pr_err("CLK %d has invalid pointer %p\n", id, clk);
306     return;
307     }
308     - if (id > unit->nr_clks) {
309     + if (id >= unit->nr_clks) {
310     pr_err("CLK %d is invalid\n", id);
311     return;
312     }
313     diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
314     index f2303da7fda7..465953c75320 100644
315     --- a/drivers/clk/mvebu/cp110-system-controller.c
316     +++ b/drivers/clk/mvebu/cp110-system-controller.c
317     @@ -172,11 +172,11 @@ static struct clk *cp110_of_clk_get(struct of_phandle_args *clkspec, void *data)
318     unsigned int idx = clkspec->args[1];
319    
320     if (type == CP110_CLK_TYPE_CORE) {
321     - if (idx > CP110_MAX_CORE_CLOCKS)
322     + if (idx >= CP110_MAX_CORE_CLOCKS)
323     return ERR_PTR(-EINVAL);
324     return clk_data->clks[idx];
325     } else if (type == CP110_CLK_TYPE_GATABLE) {
326     - if (idx > CP110_MAX_GATABLE_CLOCKS)
327     + if (idx >= CP110_MAX_GATABLE_CLOCKS)
328     return ERR_PTR(-EINVAL);
329     return clk_data->clks[CP110_MAX_CORE_CLOCKS + idx];
330     }
331     diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
332     index 7a86e24e2687..5e0d3e561b04 100644
333     --- a/drivers/gpu/drm/ast/ast_fb.c
334     +++ b/drivers/gpu/drm/ast/ast_fb.c
335     @@ -286,6 +286,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
336     {
337     struct ast_framebuffer *afb = &afbdev->afb;
338    
339     + drm_crtc_force_disable_all(dev);
340     drm_fb_helper_unregister_fbi(&afbdev->helper);
341     drm_fb_helper_release_fbi(&afbdev->helper);
342    
343     diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
344     index fd11be6b23b9..62bcc770a181 100644
345     --- a/drivers/gpu/drm/i915/intel_lrc.c
346     +++ b/drivers/gpu/drm/i915/intel_lrc.c
347     @@ -386,8 +386,13 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
348     * may not be visible to the HW prior to the completion of the UC
349     * register write and that we may begin execution from the context
350     * before its image is complete leading to invalid PD chasing.
351     + *
352     + * Furthermore, Braswell, at least, wants a full mb to be sure that
353     + * the writes are coherent in memory (visible to the GPU) prior to
354     + * execution, and not just visible to other CPUs (as is the result of
355     + * wmb).
356     */
357     - wmb();
358     + mb();
359     return ce->lrc_desc;
360     }
361    
362     diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
363     index 73bae382eac3..5c58a98f67c0 100644
364     --- a/drivers/gpu/drm/msm/msm_atomic.c
365     +++ b/drivers/gpu/drm/msm/msm_atomic.c
366     @@ -98,7 +98,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
367     if (old_state->legacy_cursor_update)
368     continue;
369    
370     + if (drm_crtc_vblank_get(crtc))
371     + continue;
372     +
373     kms->funcs->wait_for_crtc_commit_done(kms, crtc);
374     +
375     + drm_crtc_vblank_put(crtc);
376     }
377     }
378    
379     diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
380     index f2033ab36f37..8c8cbe837e61 100644
381     --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
382     +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
383     @@ -478,11 +478,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
384     return 0;
385     }
386    
387     -static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
388     -{
389     - rockchip_drm_platform_remove(pdev);
390     -}
391     -
392     static const struct of_device_id rockchip_drm_dt_ids[] = {
393     { .compatible = "rockchip,display-subsystem", },
394     { /* sentinel */ },
395     @@ -492,7 +487,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
396     static struct platform_driver rockchip_drm_platform_driver = {
397     .probe = rockchip_drm_platform_probe,
398     .remove = rockchip_drm_platform_remove,
399     - .shutdown = rockchip_drm_platform_shutdown,
400     .driver = {
401     .name = "rockchip-drm",
402     .of_match_table = rockchip_drm_dt_ids,
403     diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
404     index 4351a9343058..96a6d5df9b26 100644
405     --- a/drivers/i2c/busses/i2c-axxia.c
406     +++ b/drivers/i2c/busses/i2c-axxia.c
407     @@ -74,8 +74,7 @@
408     MST_STATUS_ND)
409     #define MST_STATUS_ERR (MST_STATUS_NAK | \
410     MST_STATUS_AL | \
411     - MST_STATUS_IP | \
412     - MST_STATUS_TSS)
413     + MST_STATUS_IP)
414     #define MST_TX_BYTES_XFRD 0x50
415     #define MST_RX_BYTES_XFRD 0x54
416     #define SCL_HIGH_PERIOD 0x80
417     @@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
418     */
419     if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
420     idev->msg_err = -EPROTO;
421     - i2c_int_disable(idev, ~0);
422     + i2c_int_disable(idev, ~MST_STATUS_TSS);
423     complete(&idev->msg_complete);
424     break;
425     }
426     @@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
427    
428     if (status & MST_STATUS_SCC) {
429     /* Stop completed */
430     - i2c_int_disable(idev, ~0);
431     + i2c_int_disable(idev, ~MST_STATUS_TSS);
432     complete(&idev->msg_complete);
433     } else if (status & MST_STATUS_SNS) {
434     /* Transfer done */
435     - i2c_int_disable(idev, ~0);
436     + i2c_int_disable(idev, ~MST_STATUS_TSS);
437     if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
438     axxia_i2c_empty_rx_fifo(idev);
439     complete(&idev->msg_complete);
440     + } else if (status & MST_STATUS_TSS) {
441     + /* Transfer timeout */
442     + idev->msg_err = -ETIMEDOUT;
443     + i2c_int_disable(idev, ~MST_STATUS_TSS);
444     + complete(&idev->msg_complete);
445     } else if (unlikely(status & MST_STATUS_ERR)) {
446     /* Transfer error */
447     i2c_int_disable(idev, ~0);
448     @@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
449     u32 rx_xfer, tx_xfer;
450     u32 addr_1, addr_2;
451     unsigned long time_left;
452     + unsigned int wt_value;
453    
454     idev->msg = msg;
455     idev->msg_xfrd = 0;
456     - idev->msg_err = 0;
457     reinit_completion(&idev->msg_complete);
458    
459     if (i2c_m_ten(msg)) {
460     @@ -382,9 +386,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
461     else if (axxia_i2c_fill_tx_fifo(idev) != 0)
462     int_mask |= MST_STATUS_TFL;
463    
464     + wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
465     + /* Disable wait timer temporarly */
466     + writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
467     + /* Check if timeout error happened */
468     + if (idev->msg_err)
469     + goto out;
470     +
471     /* Start manual mode */
472     writel(CMD_MANUAL, idev->base + MST_COMMAND);
473    
474     + writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
475     +
476     i2c_int_enable(idev, int_mask);
477    
478     time_left = wait_for_completion_timeout(&idev->msg_complete,
479     @@ -395,13 +408,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
480     if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
481     dev_warn(idev->dev, "busy after xfer\n");
482    
483     - if (time_left == 0)
484     + if (time_left == 0) {
485     idev->msg_err = -ETIMEDOUT;
486     -
487     - if (idev->msg_err == -ETIMEDOUT)
488     i2c_recover_bus(&idev->adapter);
489     + axxia_i2c_init(idev);
490     + }
491    
492     - if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
493     +out:
494     + if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
495     + idev->msg_err != -ETIMEDOUT)
496     axxia_i2c_init(idev);
497    
498     return idev->msg_err;
499     @@ -409,7 +424,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
500    
501     static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
502     {
503     - u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
504     + u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
505     unsigned long time_left;
506    
507     reinit_completion(&idev->msg_complete);
508     @@ -436,6 +451,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
509     int i;
510     int ret = 0;
511    
512     + idev->msg_err = 0;
513     + i2c_int_enable(idev, MST_STATUS_TSS);
514     +
515     for (i = 0; ret == 0 && i < num; ++i)
516     ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
517    
518     diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
519     index efefcfa24a4c..d2178f701b41 100644
520     --- a/drivers/i2c/busses/i2c-scmi.c
521     +++ b/drivers/i2c/busses/i2c-scmi.c
522     @@ -364,6 +364,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
523     {
524     struct acpi_smbus_cmi *smbus_cmi;
525     const struct acpi_device_id *id;
526     + int ret;
527    
528     smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
529     if (!smbus_cmi)
530     @@ -385,8 +386,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
531     acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
532     acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
533    
534     - if (smbus_cmi->cap_info == 0)
535     + if (smbus_cmi->cap_info == 0) {
536     + ret = -ENODEV;
537     goto err;
538     + }
539    
540     snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
541     "SMBus CMI adapter %s",
542     @@ -397,7 +400,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
543     smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
544     smbus_cmi->adapter.dev.parent = &device->dev;
545    
546     - if (i2c_add_adapter(&smbus_cmi->adapter)) {
547     + ret = i2c_add_adapter(&smbus_cmi->adapter);
548     + if (ret) {
549     dev_err(&device->dev, "Couldn't register adapter!\n");
550     goto err;
551     }
552     @@ -407,7 +411,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
553     err:
554     kfree(smbus_cmi);
555     device->driver_data = NULL;
556     - return -EIO;
557     + return ret;
558     }
559    
560     static int acpi_smbus_cmi_remove(struct acpi_device *device)
561     diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
562     index 0c5d3a99468e..b20025a5a8d9 100644
563     --- a/drivers/ide/pmac.c
564     +++ b/drivers/ide/pmac.c
565     @@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
566     struct device_node *root = of_find_node_by_path("/");
567     const char *model = of_get_property(root, "model", NULL);
568    
569     + of_node_put(root);
570     /* Get cable type from device-tree. */
571     if (cable && !strncmp(cable, "80-", 3)) {
572     /* Some drives fail to detect 80c cable in PowerBook */
573     diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
574     index 619475c7d761..4c111162d552 100644
575     --- a/drivers/infiniband/hw/hfi1/user_sdma.c
576     +++ b/drivers/infiniband/hw/hfi1/user_sdma.c
577     @@ -151,10 +151,6 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
578     #define SDMA_REQ_HAVE_AHG 1
579     #define SDMA_REQ_HAS_ERROR 2
580    
581     -#define SDMA_PKT_Q_INACTIVE BIT(0)
582     -#define SDMA_PKT_Q_ACTIVE BIT(1)
583     -#define SDMA_PKT_Q_DEFERRED BIT(2)
584     -
585     /*
586     * Maximum retry attempts to submit a TX request
587     * before putting the process to sleep.
588     @@ -408,7 +404,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
589     pq->ctxt = uctxt->ctxt;
590     pq->subctxt = fd->subctxt;
591     pq->n_max_reqs = hfi1_sdma_comp_ring_size;
592     - pq->state = SDMA_PKT_Q_INACTIVE;
593     atomic_set(&pq->n_reqs, 0);
594     init_waitqueue_head(&pq->wait);
595     atomic_set(&pq->n_locked, 0);
596     @@ -491,7 +486,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
597     /* Wait until all requests have been freed. */
598     wait_event_interruptible(
599     pq->wait,
600     - (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
601     + !atomic_read(&pq->n_reqs));
602     kfree(pq->reqs);
603     kfree(pq->req_in_use);
604     kmem_cache_destroy(pq->txreq_cache);
605     @@ -527,6 +522,13 @@ static u8 dlid_to_selector(u16 dlid)
606     return mapping[hash];
607     }
608    
609     +/**
610     + * hfi1_user_sdma_process_request() - Process and start a user sdma request
611     + * @fp: valid file pointer
612     + * @iovec: array of io vectors to process
613     + * @dim: overall iovec array size
614     + * @count: number of io vector array entries processed
615     + */
616     int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
617     unsigned long dim, unsigned long *count)
618     {
619     @@ -768,20 +770,12 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
620     }
621    
622     set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
623     + pq->state = SDMA_PKT_Q_ACTIVE;
624     /* Send the first N packets in the request to buy us some time */
625     ret = user_sdma_send_pkts(req, pcount);
626     if (unlikely(ret < 0 && ret != -EBUSY))
627     goto free_req;
628    
629     - /*
630     - * It is possible that the SDMA engine would have processed all the
631     - * submitted packets by the time we get here. Therefore, only set
632     - * packet queue state to ACTIVE if there are still uncompleted
633     - * requests.
634     - */
635     - if (atomic_read(&pq->n_reqs))
636     - xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
637     -
638     /*
639     * This is a somewhat blocking send implementation.
640     * The driver will block the caller until all packets of the
641     @@ -1526,10 +1520,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
642    
643     static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
644     {
645     - if (atomic_dec_and_test(&pq->n_reqs)) {
646     - xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
647     + if (atomic_dec_and_test(&pq->n_reqs))
648     wake_up(&pq->wait);
649     - }
650     }
651    
652     static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
653     diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
654     index 39001714f551..09dd843a13de 100644
655     --- a/drivers/infiniband/hw/hfi1/user_sdma.h
656     +++ b/drivers/infiniband/hw/hfi1/user_sdma.h
657     @@ -53,6 +53,11 @@
658    
659     extern uint extended_psn;
660    
661     +enum pkt_q_sdma_state {
662     + SDMA_PKT_Q_ACTIVE,
663     + SDMA_PKT_Q_DEFERRED,
664     +};
665     +
666     struct hfi1_user_sdma_pkt_q {
667     struct list_head list;
668     unsigned ctxt;
669     @@ -65,7 +70,7 @@ struct hfi1_user_sdma_pkt_q {
670     struct user_sdma_request *reqs;
671     unsigned long *req_in_use;
672     struct iowait busy;
673     - unsigned state;
674     + enum pkt_q_sdma_state state;
675     wait_queue_head_t wait;
676     unsigned long unpinned;
677     struct mmu_rb_handler *handler;
678     diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
679     index 6639b2b8528a..f78c464899db 100644
680     --- a/drivers/input/keyboard/omap4-keypad.c
681     +++ b/drivers/input/keyboard/omap4-keypad.c
682     @@ -60,8 +60,18 @@
683    
684     /* OMAP4 values */
685     #define OMAP4_VAL_IRQDISABLE 0x0
686     -#define OMAP4_VAL_DEBOUNCINGTIME 0x7
687     -#define OMAP4_VAL_PVT 0x7
688     +
689     +/*
690     + * Errata i689: If a key is released for a time shorter than debounce time,
691     + * the keyboard will idle and never detect the key release. The workaround
692     + * is to use at least a 12ms debounce time. See omap5432 TRM chapter
693     + * "26.4.6.2 Keyboard Controller Timer" for more information.
694     + */
695     +#define OMAP4_KEYPAD_PTV_DIV_128 0x6
696     +#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \
697     + ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
698     +#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
699     + OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
700    
701     enum {
702     KBD_REVISION_OMAP4 = 0,
703     @@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
704    
705     kbd_writel(keypad_data, OMAP4_KBD_CTRL,
706     OMAP4_DEF_CTRL_NOSOFTMODE |
707     - (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
708     + (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
709     kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
710     - OMAP4_VAL_DEBOUNCINGTIME);
711     + OMAP4_VAL_DEBOUNCINGTIME_16MS);
712     /* clear pending interrupts */
713     kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
714     kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
715     diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
716     index be3c49fa7382..a4bf14e21b5e 100644
717     --- a/drivers/mmc/host/omap.c
718     +++ b/drivers/mmc/host/omap.c
719     @@ -104,6 +104,7 @@ struct mmc_omap_slot {
720     unsigned int vdd;
721     u16 saved_con;
722     u16 bus_mode;
723     + u16 power_mode;
724     unsigned int fclk_freq;
725    
726     struct tasklet_struct cover_tasklet;
727     @@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
728     struct mmc_omap_slot *slot = mmc_priv(mmc);
729     struct mmc_omap_host *host = slot->host;
730     int i, dsor;
731     - int clk_enabled;
732     + int clk_enabled, init_stream;
733    
734     mmc_omap_select_slot(slot, 0);
735    
736     @@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
737     slot->vdd = ios->vdd;
738    
739     clk_enabled = 0;
740     + init_stream = 0;
741     switch (ios->power_mode) {
742     case MMC_POWER_OFF:
743     mmc_omap_set_power(slot, 0, ios->vdd);
744     @@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
745     case MMC_POWER_UP:
746     /* Cannot touch dsor yet, just power up MMC */
747     mmc_omap_set_power(slot, 1, ios->vdd);
748     + slot->power_mode = ios->power_mode;
749     goto exit;
750     case MMC_POWER_ON:
751     mmc_omap_fclk_enable(host, 1);
752     clk_enabled = 1;
753     dsor |= 1 << 11;
754     + if (slot->power_mode != MMC_POWER_ON)
755     + init_stream = 1;
756     break;
757     }
758     + slot->power_mode = ios->power_mode;
759    
760     if (slot->bus_mode != ios->bus_mode) {
761     if (slot->pdata->set_bus_mode != NULL)
762     @@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
763     for (i = 0; i < 2; i++)
764     OMAP_MMC_WRITE(host, CON, dsor);
765     slot->saved_con = dsor;
766     - if (ios->power_mode == MMC_POWER_ON) {
767     + if (init_stream) {
768     /* worst case at 400kHz, 80 cycles makes 200 microsecs */
769     int usecs = 250;
770    
771     @@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
772     slot->host = host;
773     slot->mmc = mmc;
774     slot->id = id;
775     + slot->power_mode = MMC_POWER_UNDEFINED;
776     slot->pdata = &host->pdata->slots[id];
777    
778     host->slots[id] = slot;
779     diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
780     index 6dcc42d79cab..1e2ee97b9240 100644
781     --- a/drivers/net/bonding/bond_3ad.c
782     +++ b/drivers/net/bonding/bond_3ad.c
783     @@ -2050,6 +2050,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
784     aggregator->aggregator_identifier);
785    
786     /* Tell the partner that this port is not suitable for aggregation */
787     + port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
788     + port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
789     + port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
790     port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
791     __update_lacpdu_from_port(port);
792     ad_lacpdu_send(port);
793     diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
794     index 7ce36dbd9b62..a3607d083332 100644
795     --- a/drivers/net/dsa/mv88e6060.c
796     +++ b/drivers/net/dsa/mv88e6060.c
797     @@ -114,8 +114,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
798     /* Reset the switch. */
799     REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
800     GLOBAL_ATU_CONTROL_SWRESET |
801     - GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
802     - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
803     + GLOBAL_ATU_CONTROL_LEARNDIS);
804    
805     /* Wait up to one second for reset to complete. */
806     timeout = jiffies + 1 * HZ;
807     @@ -140,13 +139,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
808     */
809     REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
810    
811     - /* Enable automatic address learning, set the address
812     - * database size to 1024 entries, and set the default aging
813     - * time to 5 minutes.
814     + /* Disable automatic address learning.
815     */
816     REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
817     - GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
818     - GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
819     + GLOBAL_ATU_CONTROL_LEARNDIS);
820    
821     return 0;
822     }
823     diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
824     index dafd9e1baba2..380c4a2f6516 100644
825     --- a/drivers/net/ethernet/freescale/fman/fman.c
826     +++ b/drivers/net/ethernet/freescale/fman/fman.c
827     @@ -2817,7 +2817,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
828     if (!muram_node) {
829     dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
830     __func__);
831     - goto fman_node_put;
832     + goto fman_free;
833     }
834    
835     err = of_address_to_resource(muram_node, 0,
836     @@ -2826,11 +2826,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
837     of_node_put(muram_node);
838     dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
839     __func__, err);
840     - goto fman_node_put;
841     + goto fman_free;
842     }
843    
844     of_node_put(muram_node);
845     - of_node_put(fm_node);
846    
847     err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
848     if (err < 0) {
849     diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
850     index 5098e7f21987..a0eb4e4bc525 100644
851     --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
852     +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
853     @@ -5,7 +5,7 @@
854     config MLX4_EN
855     tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
856     depends on MAY_USE_DEVLINK
857     - depends on PCI
858     + depends on PCI && NETDEVICES && ETHERNET && INET
859     select MLX4_CORE
860     select PTP_1588_CLOCK
861     ---help---
862     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
863     index 0852a1aad075..780acf23fd19 100644
864     --- a/drivers/net/wireless/mac80211_hwsim.c
865     +++ b/drivers/net/wireless/mac80211_hwsim.c
866     @@ -3403,16 +3403,16 @@ static int __init init_mac80211_hwsim(void)
867     if (err)
868     goto out_unregister_pernet;
869    
870     + err = hwsim_init_netlink();
871     + if (err)
872     + goto out_unregister_driver;
873     +
874     hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
875     if (IS_ERR(hwsim_class)) {
876     err = PTR_ERR(hwsim_class);
877     - goto out_unregister_driver;
878     + goto out_exit_netlink;
879     }
880    
881     - err = hwsim_init_netlink();
882     - if (err < 0)
883     - goto out_unregister_driver;
884     -
885     for (i = 0; i < radios; i++) {
886     struct hwsim_new_radio_params param = { 0 };
887    
888     @@ -3518,6 +3518,8 @@ out_free_mon:
889     free_netdev(hwsim_mon);
890     out_free_radios:
891     mac80211_hwsim_free();
892     +out_exit_netlink:
893     + hwsim_exit_netlink();
894     out_unregister_driver:
895     platform_driver_unregister(&mac80211_hwsim_driver);
896     out_unregister_pernet:
897     diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
898     index 2dfd877974d7..486393fa4f3e 100644
899     --- a/drivers/nvme/target/rdma.c
900     +++ b/drivers/nvme/target/rdma.c
901     @@ -524,6 +524,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
902     {
903     struct nvmet_rdma_rsp *rsp =
904     container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
905     + struct nvmet_rdma_queue *queue = cq->cq_context;
906    
907     nvmet_rdma_release_rsp(rsp);
908    
909     @@ -531,7 +532,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
910     wc->status != IB_WC_WR_FLUSH_ERR)) {
911     pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
912     wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
913     - nvmet_rdma_error_comp(rsp->queue);
914     + nvmet_rdma_error_comp(queue);
915     }
916     }
917    
918     diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
919     index a7c81e988656..383977ea3a3c 100644
920     --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
921     +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
922     @@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
923     SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
924     SUNXI_FUNCTION(0x0, "gpio_in"),
925     SUNXI_FUNCTION(0x1, "gpio_out"),
926     - SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */
927     + SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
928     };
929    
930     static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
931     diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
932     index 3e8fd33c2576..71eee39520f0 100644
933     --- a/drivers/rtc/rtc-snvs.c
934     +++ b/drivers/rtc/rtc-snvs.c
935     @@ -47,49 +47,83 @@ struct snvs_rtc_data {
936     struct clk *clk;
937     };
938    
939     +/* Read 64 bit timer register, which could be in inconsistent state */
940     +static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
941     +{
942     + u32 msb, lsb;
943     +
944     + regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &msb);
945     + regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &lsb);
946     + return (u64)msb << 32 | lsb;
947     +}
948     +
949     +/* Read the secure real time counter, taking care to deal with the cases of the
950     + * counter updating while being read.
951     + */
952     static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
953     {
954     u64 read1, read2;
955     - u32 val;
956     + unsigned int timeout = 100;
957    
958     + /* As expected, the registers might update between the read of the LSB
959     + * reg and the MSB reg. It's also possible that one register might be
960     + * in partially modified state as well.
961     + */
962     + read1 = rtc_read_lpsrt(data);
963     do {
964     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
965     - read1 = val;
966     - read1 <<= 32;
967     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
968     - read1 |= val;
969     -
970     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
971     - read2 = val;
972     - read2 <<= 32;
973     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
974     - read2 |= val;
975     - } while (read1 != read2);
976     + read2 = read1;
977     + read1 = rtc_read_lpsrt(data);
978     + } while (read1 != read2 && --timeout);
979     + if (!timeout)
980     + dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
981    
982     /* Convert 47-bit counter to 32-bit raw second count */
983     return (u32) (read1 >> CNTR_TO_SECS_SH);
984     }
985    
986     -static void rtc_write_sync_lp(struct snvs_rtc_data *data)
987     +/* Just read the lsb from the counter, dealing with inconsistent state */
988     +static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
989     {
990     - u32 count1, count2, count3;
991     - int i;
992     -
993     - /* Wait for 3 CKIL cycles */
994     - for (i = 0; i < 3; i++) {
995     - do {
996     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
997     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
998     - } while (count1 != count2);
999     -
1000     - /* Now wait until counter value changes */
1001     - do {
1002     - do {
1003     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
1004     - regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
1005     - } while (count2 != count3);
1006     - } while (count3 == count1);
1007     + u32 count1, count2;
1008     + unsigned int timeout = 100;
1009     +
1010     + regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
1011     + do {
1012     + count2 = count1;
1013     + regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
1014     + } while (count1 != count2 && --timeout);
1015     + if (!timeout) {
1016     + dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
1017     + return -ETIMEDOUT;
1018     }
1019     +
1020     + *lsb = count1;
1021     + return 0;
1022     +}
1023     +
1024     +static int rtc_write_sync_lp(struct snvs_rtc_data *data)
1025     +{
1026     + u32 count1, count2;
1027     + u32 elapsed;
1028     + unsigned int timeout = 1000;
1029     + int ret;
1030     +
1031     + ret = rtc_read_lp_counter_lsb(data, &count1);
1032     + if (ret)
1033     + return ret;
1034     +
1035     + /* Wait for 3 CKIL cycles, about 61.0-91.5 µs */
1036     + do {
1037     + ret = rtc_read_lp_counter_lsb(data, &count2);
1038     + if (ret)
1039     + return ret;
1040     + elapsed = count2 - count1; /* wrap around _is_ handled! */
1041     + } while (elapsed < 3 && --timeout);
1042     + if (!timeout) {
1043     + dev_err(&data->rtc->dev, "Timeout waiting for LPSRT Counter to change\n");
1044     + return -ETIMEDOUT;
1045     + }
1046     + return 0;
1047     }
1048    
1049     static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
1050     @@ -173,9 +207,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
1051     (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
1052     enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
1053    
1054     - rtc_write_sync_lp(data);
1055     -
1056     - return 0;
1057     + return rtc_write_sync_lp(data);
1058     }
1059    
1060     static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
1061     @@ -183,10 +215,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
1062     struct snvs_rtc_data *data = dev_get_drvdata(dev);
1063     struct rtc_time *alrm_tm = &alrm->time;
1064     unsigned long time;
1065     + int ret;
1066    
1067     rtc_tm_to_time(alrm_tm, &time);
1068    
1069     regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
1070     + ret = rtc_write_sync_lp(data);
1071     + if (ret)
1072     + return ret;
1073     regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
1074    
1075     /* Clear alarm interrupt status bit */
1076     diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
1077     index 33fbe8249fd5..044cffbc45e8 100644
1078     --- a/drivers/sbus/char/display7seg.c
1079     +++ b/drivers/sbus/char/display7seg.c
1080     @@ -221,6 +221,7 @@ static int d7s_probe(struct platform_device *op)
1081     dev_set_drvdata(&op->dev, p);
1082     d7s_device = p;
1083     err = 0;
1084     + of_node_put(opts);
1085    
1086     out:
1087     return err;
1088     diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
1089     index 5609b602c54d..baa9b322520b 100644
1090     --- a/drivers/sbus/char/envctrl.c
1091     +++ b/drivers/sbus/char/envctrl.c
1092     @@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
1093     for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
1094     pchild->mon_type[len] = ENVCTRL_NOMON;
1095     }
1096     + of_node_put(root_node);
1097     return;
1098     }
1099     + of_node_put(root_node);
1100     }
1101    
1102     /* Get the monitor channels. */
1103     diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1104     index cc8f2a7c2463..c79743de48f9 100644
1105     --- a/drivers/scsi/libiscsi.c
1106     +++ b/drivers/scsi/libiscsi.c
1107     @@ -2414,8 +2414,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
1108     failed:
1109     ISCSI_DBG_EH(session,
1110     "failing session reset: Could not log back into "
1111     - "%s, %s [age %d]\n", session->targetname,
1112     - conn->persistent_address, session->age);
1113     + "%s [age %d]\n", session->targetname,
1114     + session->age);
1115     spin_unlock_bh(&session->frwd_lock);
1116     mutex_unlock(&session->eh_mutex);
1117     return FAILED;
1118     diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
1119     index 874e9f085326..fcfbe2dcd025 100644
1120     --- a/drivers/scsi/vmw_pvscsi.c
1121     +++ b/drivers/scsi/vmw_pvscsi.c
1122     @@ -1233,8 +1233,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1123    
1124     static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1125     {
1126     - pvscsi_shutdown_intr(adapter);
1127     -
1128     if (adapter->workqueue)
1129     destroy_workqueue(adapter->workqueue);
1130    
1131     @@ -1563,6 +1561,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1132     out_reset_adapter:
1133     ll_adapter_reset(adapter);
1134     out_release_resources:
1135     + pvscsi_shutdown_intr(adapter);
1136     pvscsi_release_resources(adapter);
1137     scsi_host_put(host);
1138     out_disable_device:
1139     @@ -1571,6 +1570,7 @@ out_disable_device:
1140     return error;
1141    
1142     out_release_resources_and_disable:
1143     + pvscsi_shutdown_intr(adapter);
1144     pvscsi_release_resources(adapter);
1145     goto out_disable_device;
1146     }
1147     diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
1148     index 127472bd6a7c..209f314745ab 100644
1149     --- a/drivers/tty/serial/suncore.c
1150     +++ b/drivers/tty/serial/suncore.c
1151     @@ -111,6 +111,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
1152     mode = of_get_property(dp, mode_prop, NULL);
1153     if (!mode)
1154     mode = "9600,8,n,1,-";
1155     + of_node_put(dp);
1156     }
1157    
1158     cflag = CREAD | HUPCL | CLOCAL;
1159     diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
1160     index f800f89068db..46f966d7c328 100644
1161     --- a/drivers/vhost/vsock.c
1162     +++ b/drivers/vhost/vsock.c
1163     @@ -559,13 +559,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
1164     * executing.
1165     */
1166    
1167     - if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
1168     - sock_set_flag(sk, SOCK_DONE);
1169     - vsk->peer_shutdown = SHUTDOWN_MASK;
1170     - sk->sk_state = SS_UNCONNECTED;
1171     - sk->sk_err = ECONNRESET;
1172     - sk->sk_error_report(sk);
1173     - }
1174     + /* If the peer is still valid, no need to reset connection */
1175     + if (vhost_vsock_get(vsk->remote_addr.svm_cid))
1176     + return;
1177     +
1178     + /* If the close timeout is pending, let it expire. This avoids races
1179     + * with the timeout callback.
1180     + */
1181     + if (vsk->close_work_scheduled)
1182     + return;
1183     +
1184     + sock_set_flag(sk, SOCK_DONE);
1185     + vsk->peer_shutdown = SHUTDOWN_MASK;
1186     + sk->sk_state = SS_UNCONNECTED;
1187     + sk->sk_err = ECONNRESET;
1188     + sk->sk_error_report(sk);
1189     }
1190    
1191     static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
1192     diff --git a/fs/aio.c b/fs/aio.c
1193     index b1170a7affe2..c3fc80294397 100644
1194     --- a/fs/aio.c
1195     +++ b/fs/aio.c
1196     @@ -40,6 +40,7 @@
1197     #include <linux/ramfs.h>
1198     #include <linux/percpu-refcount.h>
1199     #include <linux/mount.h>
1200     +#include <linux/nospec.h>
1201    
1202     #include <asm/kmap_types.h>
1203     #include <asm/uaccess.h>
1204     @@ -1071,6 +1072,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
1205     if (!table || id >= table->nr)
1206     goto out;
1207    
1208     + id = array_index_nospec(id, table->nr);
1209     ctx = rcu_dereference(table->table[id]);
1210     if (ctx && ctx->user_id == ctx_id) {
1211     if (percpu_ref_tryget_live(&ctx->users))
1212     diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
1213     index e7b478b49985..8bef27b8f85d 100644
1214     --- a/fs/cifs/Kconfig
1215     +++ b/fs/cifs/Kconfig
1216     @@ -111,7 +111,7 @@ config CIFS_XATTR
1217    
1218     config CIFS_POSIX
1219     bool "CIFS POSIX Extensions"
1220     - depends on CIFS_XATTR
1221     + depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
1222     help
1223     Enabling this option will cause the cifs client to attempt to
1224     negotiate a newer dialect with servers, such as Samba 3.0.5
1225     diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
1226     index 1ab91124a93e..53f0012ace42 100644
1227     --- a/fs/nfs/direct.c
1228     +++ b/fs/nfs/direct.c
1229     @@ -98,8 +98,11 @@ struct nfs_direct_req {
1230     struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
1231     struct work_struct work;
1232     int flags;
1233     + /* for write */
1234     #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
1235     #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
1236     + /* for read */
1237     +#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
1238     struct nfs_writeverf verf; /* unstable write verifier */
1239     };
1240    
1241     @@ -422,7 +425,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
1242     struct nfs_page *req = nfs_list_entry(hdr->pages.next);
1243     struct page *page = req->wb_page;
1244    
1245     - if (!PageCompound(page) && bytes < hdr->good_bytes)
1246     + if (!PageCompound(page) && bytes < hdr->good_bytes &&
1247     + (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
1248     set_page_dirty(page);
1249     bytes += req->wb_bytes;
1250     nfs_list_remove_request(req);
1251     @@ -597,6 +601,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
1252     if (!is_sync_kiocb(iocb))
1253     dreq->iocb = iocb;
1254    
1255     + if (iter_is_iovec(iter))
1256     + dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
1257     +
1258     nfs_start_io_direct(inode);
1259    
1260     NFS_I(inode)->read_io += count;
1261     diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
1262     index 034acd0c4956..d10f1e7d6ba8 100644
1263     --- a/include/asm-generic/qspinlock_types.h
1264     +++ b/include/asm-generic/qspinlock_types.h
1265     @@ -29,13 +29,41 @@
1266     #endif
1267    
1268     typedef struct qspinlock {
1269     - atomic_t val;
1270     + union {
1271     + atomic_t val;
1272     +
1273     + /*
1274     + * By using the whole 2nd least significant byte for the
1275     + * pending bit, we can allow better optimization of the lock
1276     + * acquisition for the pending bit holder.
1277     + */
1278     +#ifdef __LITTLE_ENDIAN
1279     + struct {
1280     + u8 locked;
1281     + u8 pending;
1282     + };
1283     + struct {
1284     + u16 locked_pending;
1285     + u16 tail;
1286     + };
1287     +#else
1288     + struct {
1289     + u16 tail;
1290     + u16 locked_pending;
1291     + };
1292     + struct {
1293     + u8 reserved[2];
1294     + u8 pending;
1295     + u8 locked;
1296     + };
1297     +#endif
1298     + };
1299     } arch_spinlock_t;
1300    
1301     /*
1302     * Initializier
1303     */
1304     -#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) }
1305     +#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
1306    
1307     /*
1308     * Bitfields in the atomic value:
1309     diff --git a/include/linux/compat.h b/include/linux/compat.h
1310     index d8535a430caf..fab35daf8759 100644
1311     --- a/include/linux/compat.h
1312     +++ b/include/linux/compat.h
1313     @@ -67,6 +67,9 @@ typedef struct compat_sigaltstack {
1314     compat_size_t ss_size;
1315     } compat_stack_t;
1316     #endif
1317     +#ifndef COMPAT_MINSIGSTKSZ
1318     +#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
1319     +#endif
1320    
1321     #define compat_jiffies_to_clock_t(x) \
1322     (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
1323     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1324     index 1438b7396cb4..335c00209f74 100644
1325     --- a/kernel/bpf/verifier.c
1326     +++ b/kernel/bpf/verifier.c
1327     @@ -2919,6 +2919,9 @@ static int do_check(struct bpf_verifier_env *env)
1328     goto process_bpf_exit;
1329     }
1330    
1331     + if (signal_pending(current))
1332     + return -EAGAIN;
1333     +
1334     if (need_resched())
1335     cond_resched();
1336    
1337     diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
1338     index a72f5df643f8..0ed478e10071 100644
1339     --- a/kernel/locking/qspinlock.c
1340     +++ b/kernel/locking/qspinlock.c
1341     @@ -75,6 +75,18 @@
1342     #define MAX_NODES 4
1343     #endif
1344    
1345     +/*
1346     + * The pending bit spinning loop count.
1347     + * This heuristic is used to limit the number of lockword accesses
1348     + * made by atomic_cond_read_relaxed when waiting for the lock to
1349     + * transition out of the "== _Q_PENDING_VAL" state. We don't spin
1350     + * indefinitely because there's no guarantee that we'll make forward
1351     + * progress.
1352     + */
1353     +#ifndef _Q_PENDING_LOOPS
1354     +#define _Q_PENDING_LOOPS 1
1355     +#endif
1356     +
1357     /*
1358     * Per-CPU queue node structures; we can never have more than 4 nested
1359     * contexts: task, softirq, hardirq, nmi.
1360     @@ -113,41 +125,18 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
1361    
1362     #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
1363    
1364     -/*
1365     - * By using the whole 2nd least significant byte for the pending bit, we
1366     - * can allow better optimization of the lock acquisition for the pending
1367     - * bit holder.
1368     +#if _Q_PENDING_BITS == 8
1369     +/**
1370     + * clear_pending - clear the pending bit.
1371     + * @lock: Pointer to queued spinlock structure
1372     *
1373     - * This internal structure is also used by the set_locked function which
1374     - * is not restricted to _Q_PENDING_BITS == 8.
1375     + * *,1,* -> *,0,*
1376     */
1377     -struct __qspinlock {
1378     - union {
1379     - atomic_t val;
1380     -#ifdef __LITTLE_ENDIAN
1381     - struct {
1382     - u8 locked;
1383     - u8 pending;
1384     - };
1385     - struct {
1386     - u16 locked_pending;
1387     - u16 tail;
1388     - };
1389     -#else
1390     - struct {
1391     - u16 tail;
1392     - u16 locked_pending;
1393     - };
1394     - struct {
1395     - u8 reserved[2];
1396     - u8 pending;
1397     - u8 locked;
1398     - };
1399     -#endif
1400     - };
1401     -};
1402     +static __always_inline void clear_pending(struct qspinlock *lock)
1403     +{
1404     + WRITE_ONCE(lock->pending, 0);
1405     +}
1406    
1407     -#if _Q_PENDING_BITS == 8
1408     /**
1409     * clear_pending_set_locked - take ownership and clear the pending bit.
1410     * @lock: Pointer to queued spinlock structure
1411     @@ -158,9 +147,7 @@ struct __qspinlock {
1412     */
1413     static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
1414     {
1415     - struct __qspinlock *l = (void *)lock;
1416     -
1417     - WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
1418     + WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
1419     }
1420    
1421     /*
1422     @@ -169,24 +156,33 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
1423     * @tail : The new queue tail code word
1424     * Return: The previous queue tail code word
1425     *
1426     - * xchg(lock, tail)
1427     + * xchg(lock, tail), which heads an address dependency
1428     *
1429     * p,*,* -> n,*,* ; prev = xchg(lock, node)
1430     */
1431     static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
1432     {
1433     - struct __qspinlock *l = (void *)lock;
1434     -
1435     /*
1436     * Use release semantics to make sure that the MCS node is properly
1437     * initialized before changing the tail code.
1438     */
1439     - return (u32)xchg_release(&l->tail,
1440     + return (u32)xchg_release(&lock->tail,
1441     tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
1442     }
1443    
1444     #else /* _Q_PENDING_BITS == 8 */
1445    
1446     +/**
1447     + * clear_pending - clear the pending bit.
1448     + * @lock: Pointer to queued spinlock structure
1449     + *
1450     + * *,1,* -> *,0,*
1451     + */
1452     +static __always_inline void clear_pending(struct qspinlock *lock)
1453     +{
1454     + atomic_andnot(_Q_PENDING_VAL, &lock->val);
1455     +}
1456     +
1457     /**
1458     * clear_pending_set_locked - take ownership and clear the pending bit.
1459     * @lock: Pointer to queued spinlock structure
1460     @@ -228,6 +224,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
1461     }
1462     #endif /* _Q_PENDING_BITS == 8 */
1463    
1464     +/**
1465     + * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
1466     + * @lock : Pointer to queued spinlock structure
1467     + * Return: The previous lock value
1468     + *
1469     + * *,*,* -> *,1,*
1470     + */
1471     +#ifndef queued_fetch_set_pending_acquire
1472     +static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
1473     +{
1474     + return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
1475     +}
1476     +#endif
1477     +
1478     /**
1479     * set_locked - Set the lock bit and own the lock
1480     * @lock: Pointer to queued spinlock structure
1481     @@ -236,9 +246,7 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
1482     */
1483     static __always_inline void set_locked(struct qspinlock *lock)
1484     {
1485     - struct __qspinlock *l = (void *)lock;
1486     -
1487     - WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
1488     + WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
1489     }
1490    
1491    
1492     @@ -410,7 +418,7 @@ EXPORT_SYMBOL(queued_spin_unlock_wait);
1493     void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
1494     {
1495     struct mcs_spinlock *prev, *next, *node;
1496     - u32 new, old, tail;
1497     + u32 old, tail;
1498     int idx;
1499    
1500     BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
1501     @@ -422,65 +430,58 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
1502     return;
1503    
1504     /*
1505     - * wait for in-progress pending->locked hand-overs
1506     + * Wait for in-progress pending->locked hand-overs with a bounded
1507     + * number of spins so that we guarantee forward progress.
1508     *
1509     * 0,1,0 -> 0,0,1
1510     */
1511     if (val == _Q_PENDING_VAL) {
1512     - while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
1513     - cpu_relax();
1514     + int cnt = _Q_PENDING_LOOPS;
1515     + val = smp_cond_load_acquire(&lock->val.counter,
1516     + (VAL != _Q_PENDING_VAL) || !cnt--);
1517     }
1518    
1519     + /*
1520     + * If we observe any contention; queue.
1521     + */
1522     + if (val & ~_Q_LOCKED_MASK)
1523     + goto queue;
1524     +
1525     /*
1526     * trylock || pending
1527     *
1528     * 0,0,0 -> 0,0,1 ; trylock
1529     * 0,0,1 -> 0,1,1 ; pending
1530     */
1531     - for (;;) {
1532     - /*
1533     - * If we observe any contention; queue.
1534     - */
1535     - if (val & ~_Q_LOCKED_MASK)
1536     - goto queue;
1537     -
1538     - new = _Q_LOCKED_VAL;
1539     - if (val == new)
1540     - new |= _Q_PENDING_VAL;
1541     -
1542     - /*
1543     - * Acquire semantic is required here as the function may
1544     - * return immediately if the lock was free.
1545     - */
1546     - old = atomic_cmpxchg_acquire(&lock->val, val, new);
1547     - if (old == val)
1548     - break;
1549     -
1550     - val = old;
1551     - }
1552     + val = queued_fetch_set_pending_acquire(lock);
1553    
1554     /*
1555     - * we won the trylock
1556     + * If we observe any contention; undo and queue.
1557     */
1558     - if (new == _Q_LOCKED_VAL)
1559     - return;
1560     + if (unlikely(val & ~_Q_LOCKED_MASK)) {
1561     + if (!(val & _Q_PENDING_MASK))
1562     + clear_pending(lock);
1563     + goto queue;
1564     + }
1565    
1566     /*
1567     - * we're pending, wait for the owner to go away.
1568     + * We're pending, wait for the owner to go away.
1569     *
1570     - * *,1,1 -> *,1,0
1571     + * 0,1,1 -> 0,1,0
1572     *
1573     * this wait loop must be a load-acquire such that we match the
1574     * store-release that clears the locked bit and create lock
1575     - * sequentiality; this is because not all clear_pending_set_locked()
1576     - * implementations imply full barriers.
1577     + * sequentiality; this is because not all
1578     + * clear_pending_set_locked() implementations imply full
1579     + * barriers.
1580     */
1581     - smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
1582     + if (val & _Q_LOCKED_MASK)
1583     + smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
1584    
1585     /*
1586     * take ownership and clear the pending bit.
1587     *
1588     - * *,1,0 -> *,0,1
1589     + * 0,1,0 -> 0,0,1
1590     */
1591     clear_pending_set_locked(lock);
1592     return;
1593     @@ -532,16 +533,15 @@ queue:
1594     */
1595     if (old & _Q_TAIL_MASK) {
1596     prev = decode_tail(old);
1597     +
1598     /*
1599     - * The above xchg_tail() is also a load of @lock which generates,
1600     - * through decode_tail(), a pointer.
1601     - *
1602     - * The address dependency matches the RELEASE of xchg_tail()
1603     - * such that the access to @prev must happen after.
1604     + * We must ensure that the stores to @node are observed before
1605     + * the write to prev->next. The address dependency from
1606     + * xchg_tail is not sufficient to ensure this because the read
1607     + * component of xchg_tail is unordered with respect to the
1608     + * initialisation of @node.
1609     */
1610     - smp_read_barrier_depends();
1611     -
1612     - WRITE_ONCE(prev->next, node);
1613     + smp_store_release(&prev->next, node);
1614    
1615     pv_wait_node(node, prev);
1616     arch_mcs_spin_lock_contended(&node->locked);
1617     @@ -588,30 +588,27 @@ locked:
1618     * claim the lock:
1619     *
1620     * n,0,0 -> 0,0,1 : lock, uncontended
1621     - * *,0,0 -> *,0,1 : lock, contended
1622     + * *,*,0 -> *,*,1 : lock, contended
1623     *
1624     - * If the queue head is the only one in the queue (lock value == tail),
1625     - * clear the tail code and grab the lock. Otherwise, we only need
1626     - * to grab the lock.
1627     + * If the queue head is the only one in the queue (lock value == tail)
1628     + * and nobody is pending, clear the tail code and grab the lock.
1629     + * Otherwise, we only need to grab the lock.
1630     */
1631     - for (;;) {
1632     - /* In the PV case we might already have _Q_LOCKED_VAL set */
1633     - if ((val & _Q_TAIL_MASK) != tail) {
1634     - set_locked(lock);
1635     - break;
1636     - }
1637     +
1638     + /* In the PV case we might already have _Q_LOCKED_VAL set */
1639     + if ((val & _Q_TAIL_MASK) == tail) {
1640     /*
1641     * The smp_cond_load_acquire() call above has provided the
1642     - * necessary acquire semantics required for locking. At most
1643     - * two iterations of this loop may be ran.
1644     + * necessary acquire semantics required for locking.
1645     */
1646     old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
1647     if (old == val)
1648     - goto release; /* No contention */
1649     -
1650     - val = old;
1651     + goto release; /* No contention */
1652     }
1653    
1654     + /* Either somebody is queued behind us or _Q_PENDING_VAL is set */
1655     + set_locked(lock);
1656     +
1657     /*
1658     * contended path; wait for next if not observed yet, release.
1659     */
1660     diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
1661     index e3b5520005db..af2a24d484aa 100644
1662     --- a/kernel/locking/qspinlock_paravirt.h
1663     +++ b/kernel/locking/qspinlock_paravirt.h
1664     @@ -69,10 +69,8 @@ struct pv_node {
1665     #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l)
1666     static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
1667     {
1668     - struct __qspinlock *l = (void *)lock;
1669     -
1670     if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
1671     - (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
1672     + (cmpxchg(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
1673     qstat_inc(qstat_pv_lock_stealing, true);
1674     return true;
1675     }
1676     @@ -87,16 +85,7 @@ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
1677     #if _Q_PENDING_BITS == 8
1678     static __always_inline void set_pending(struct qspinlock *lock)
1679     {
1680     - struct __qspinlock *l = (void *)lock;
1681     -
1682     - WRITE_ONCE(l->pending, 1);
1683     -}
1684     -
1685     -static __always_inline void clear_pending(struct qspinlock *lock)
1686     -{
1687     - struct __qspinlock *l = (void *)lock;
1688     -
1689     - WRITE_ONCE(l->pending, 0);
1690     + WRITE_ONCE(lock->pending, 1);
1691     }
1692    
1693     /*
1694     @@ -106,10 +95,8 @@ static __always_inline void clear_pending(struct qspinlock *lock)
1695     */
1696     static __always_inline int trylock_clear_pending(struct qspinlock *lock)
1697     {
1698     - struct __qspinlock *l = (void *)lock;
1699     -
1700     - return !READ_ONCE(l->locked) &&
1701     - (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
1702     + return !READ_ONCE(lock->locked) &&
1703     + (cmpxchg(&lock->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
1704     == _Q_PENDING_VAL);
1705     }
1706     #else /* _Q_PENDING_BITS == 8 */
1707     @@ -118,11 +105,6 @@ static __always_inline void set_pending(struct qspinlock *lock)
1708     atomic_or(_Q_PENDING_VAL, &lock->val);
1709     }
1710    
1711     -static __always_inline void clear_pending(struct qspinlock *lock)
1712     -{
1713     - atomic_andnot(_Q_PENDING_VAL, &lock->val);
1714     -}
1715     -
1716     static __always_inline int trylock_clear_pending(struct qspinlock *lock)
1717     {
1718     int val = atomic_read(&lock->val);
1719     @@ -353,7 +335,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
1720     static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
1721     {
1722     struct pv_node *pn = (struct pv_node *)node;
1723     - struct __qspinlock *l = (void *)lock;
1724    
1725     /*
1726     * If the vCPU is indeed halted, advance its state to match that of
1727     @@ -372,7 +353,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
1728     * the hash table later on at unlock time, no atomic instruction is
1729     * needed.
1730     */
1731     - WRITE_ONCE(l->locked, _Q_SLOW_VAL);
1732     + WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
1733     (void)pv_hash(lock, pn);
1734     }
1735    
1736     @@ -387,7 +368,6 @@ static u32
1737     pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
1738     {
1739     struct pv_node *pn = (struct pv_node *)node;
1740     - struct __qspinlock *l = (void *)lock;
1741     struct qspinlock **lp = NULL;
1742     int waitcnt = 0;
1743     int loop;
1744     @@ -438,13 +418,13 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
1745     *
1746     * Matches the smp_rmb() in __pv_queued_spin_unlock().
1747     */
1748     - if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
1749     + if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
1750     /*
1751     * The lock was free and now we own the lock.
1752     * Change the lock value back to _Q_LOCKED_VAL
1753     * and unhash the table.
1754     */
1755     - WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
1756     + WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
1757     WRITE_ONCE(*lp, NULL);
1758     goto gotlock;
1759     }
1760     @@ -452,7 +432,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
1761     WRITE_ONCE(pn->state, vcpu_hashed);
1762     qstat_inc(qstat_pv_wait_head, true);
1763     qstat_inc(qstat_pv_wait_again, waitcnt);
1764     - pv_wait(&l->locked, _Q_SLOW_VAL);
1765     + pv_wait(&lock->locked, _Q_SLOW_VAL);
1766    
1767     /*
1768     * Because of lock stealing, the queue head vCPU may not be
1769     @@ -477,7 +457,6 @@ gotlock:
1770     __visible void
1771     __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
1772     {
1773     - struct __qspinlock *l = (void *)lock;
1774     struct pv_node *node;
1775    
1776     if (unlikely(locked != _Q_SLOW_VAL)) {
1777     @@ -506,7 +485,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
1778     * Now that we have a reference to the (likely) blocked pv_node,
1779     * release the lock.
1780     */
1781     - smp_store_release(&l->locked, 0);
1782     + smp_store_release(&lock->locked, 0);
1783    
1784     /*
1785     * At this point the memory pointed at by lock can be freed/reused,
1786     @@ -532,7 +511,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
1787     #ifndef __pv_queued_spin_unlock
1788     __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
1789     {
1790     - struct __qspinlock *l = (void *)lock;
1791     u8 locked;
1792    
1793     /*
1794     @@ -540,7 +518,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
1795     * unhash. Otherwise it would be possible to have multiple @lock
1796     * entries, which would be BAD.
1797     */
1798     - locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0);
1799     + locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
1800     if (likely(locked == _Q_LOCKED_VAL))
1801     return;
1802    
1803     diff --git a/kernel/signal.c b/kernel/signal.c
1804     index 424306163edc..049929a5f4ce 100644
1805     --- a/kernel/signal.c
1806     +++ b/kernel/signal.c
1807     @@ -3116,7 +3116,8 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1808     }
1809    
1810     static int
1811     -do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
1812     +do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp,
1813     + size_t min_ss_size)
1814     {
1815     stack_t oss;
1816     int error;
1817     @@ -3155,9 +3156,8 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
1818     ss_size = 0;
1819     ss_sp = NULL;
1820     } else {
1821     - error = -ENOMEM;
1822     - if (ss_size < MINSIGSTKSZ)
1823     - goto out;
1824     + if (unlikely(ss_size < min_ss_size))
1825     + return -ENOMEM;
1826     }
1827    
1828     current->sas_ss_sp = (unsigned long) ss_sp;
1829     @@ -3180,12 +3180,14 @@ out:
1830     }
1831     SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
1832     {
1833     - return do_sigaltstack(uss, uoss, current_user_stack_pointer());
1834     + return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
1835     + MINSIGSTKSZ);
1836     }
1837    
1838     int restore_altstack(const stack_t __user *uss)
1839     {
1840     - int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
1841     + int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
1842     + MINSIGSTKSZ);
1843     /* squash all but EFAULT for now */
1844     return err == -EFAULT ? err : 0;
1845     }
1846     @@ -3226,7 +3228,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
1847     set_fs(KERNEL_DS);
1848     ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
1849     (stack_t __force __user *) &uoss,
1850     - compat_user_stack_pointer());
1851     + compat_user_stack_pointer(),
1852     + COMPAT_MINSIGSTKSZ);
1853     set_fs(seg);
1854     if (ret >= 0 && uoss_ptr) {
1855     if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
1856     diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
1857     index ef4f16e81283..1407ed20ea93 100644
1858     --- a/kernel/time/timer_list.c
1859     +++ b/kernel/time/timer_list.c
1860     @@ -399,7 +399,7 @@ static int __init init_timer_list_procfs(void)
1861     {
1862     struct proc_dir_entry *pe;
1863    
1864     - pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
1865     + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
1866     if (!pe)
1867     return -ENOMEM;
1868     return 0;
1869     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1870     index 2884fe01cb54..8f4227d4cd39 100644
1871     --- a/kernel/trace/ftrace.c
1872     +++ b/kernel/trace/ftrace.c
1873     @@ -4836,6 +4836,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
1874     if (ops->flags & FTRACE_OPS_FL_ENABLED)
1875     ftrace_shutdown(ops, 0);
1876     ops->flags |= FTRACE_OPS_FL_DELETED;
1877     + ftrace_free_filter(ops);
1878     mutex_unlock(&ftrace_lock);
1879     }
1880    
1881     diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
1882     index 8819944bbcbf..7e6971ba9541 100644
1883     --- a/kernel/trace/trace_events_trigger.c
1884     +++ b/kernel/trace/trace_events_trigger.c
1885     @@ -742,8 +742,10 @@ int set_trigger_filter(char *filter_str,
1886    
1887     /* The filter is for the 'trigger' event, not the triggered event */
1888     ret = create_event_filter(file->event_call, filter_str, false, &filter);
1889     - if (ret)
1890     - goto out;
1891     + /*
1892     + * If create_event_filter() fails, filter still needs to be freed.
1893     + * Which the calling code will do with data->filter.
1894     + */
1895     assign:
1896     tmp = rcu_access_pointer(data->filter);
1897    
1898     diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
1899     index 245900b98c8e..222c8010bda0 100644
1900     --- a/lib/interval_tree_test.c
1901     +++ b/lib/interval_tree_test.c
1902     @@ -1,27 +1,38 @@
1903     #include <linux/module.h>
1904     +#include <linux/moduleparam.h>
1905     #include <linux/interval_tree.h>
1906     #include <linux/random.h>
1907     +#include <linux/slab.h>
1908     #include <asm/timex.h>
1909    
1910     -#define NODES 100
1911     -#define PERF_LOOPS 100000
1912     -#define SEARCHES 100
1913     -#define SEARCH_LOOPS 10000
1914     +#define __param(type, name, init, msg) \
1915     + static type name = init; \
1916     + module_param(name, type, 0444); \
1917     + MODULE_PARM_DESC(name, msg);
1918     +
1919     +__param(int, nnodes, 100, "Number of nodes in the interval tree");
1920     +__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
1921     +
1922     +__param(int, nsearches, 100, "Number of searches to the interval tree");
1923     +__param(int, search_loops, 1000, "Number of iterations searching the tree");
1924     +__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
1925     +
1926     +__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
1927    
1928     static struct rb_root root = RB_ROOT;
1929     -static struct interval_tree_node nodes[NODES];
1930     -static u32 queries[SEARCHES];
1931     +static struct interval_tree_node *nodes = NULL;
1932     +static u32 *queries = NULL;
1933    
1934     static struct rnd_state rnd;
1935    
1936     static inline unsigned long
1937     -search(unsigned long query, struct rb_root *root)
1938     +search(struct rb_root *root, unsigned long start, unsigned long last)
1939     {
1940     struct interval_tree_node *node;
1941     unsigned long results = 0;
1942    
1943     - for (node = interval_tree_iter_first(root, query, query); node;
1944     - node = interval_tree_iter_next(node, query, query))
1945     + for (node = interval_tree_iter_first(root, start, last); node;
1946     + node = interval_tree_iter_next(node, start, last))
1947     results++;
1948     return results;
1949     }
1950     @@ -29,19 +40,22 @@ search(unsigned long query, struct rb_root *root)
1951     static void init(void)
1952     {
1953     int i;
1954     - for (i = 0; i < NODES; i++) {
1955     - u32 a = prandom_u32_state(&rnd);
1956     - u32 b = prandom_u32_state(&rnd);
1957     - if (a <= b) {
1958     - nodes[i].start = a;
1959     - nodes[i].last = b;
1960     - } else {
1961     - nodes[i].start = b;
1962     - nodes[i].last = a;
1963     - }
1964     +
1965     + for (i = 0; i < nnodes; i++) {
1966     + u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
1967     + u32 a = (prandom_u32_state(&rnd) >> 4) % b;
1968     +
1969     + nodes[i].start = a;
1970     + nodes[i].last = b;
1971     }
1972     - for (i = 0; i < SEARCHES; i++)
1973     - queries[i] = prandom_u32_state(&rnd);
1974     +
1975     + /*
1976     + * Limit the search scope to what the user defined.
1977     + * Otherwise we are merely measuring empty walks,
1978     + * which is pointless.
1979     + */
1980     + for (i = 0; i < nsearches; i++)
1981     + queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
1982     }
1983    
1984     static int interval_tree_test_init(void)
1985     @@ -50,6 +64,16 @@ static int interval_tree_test_init(void)
1986     unsigned long results;
1987     cycles_t time1, time2, time;
1988    
1989     + nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
1990     + if (!nodes)
1991     + return -ENOMEM;
1992     +
1993     + queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
1994     + if (!queries) {
1995     + kfree(nodes);
1996     + return -ENOMEM;
1997     + }
1998     +
1999     printk(KERN_ALERT "interval tree insert/remove");
2000    
2001     prandom_seed_state(&rnd, 3141592653589793238ULL);
2002     @@ -57,39 +81,46 @@ static int interval_tree_test_init(void)
2003    
2004     time1 = get_cycles();
2005    
2006     - for (i = 0; i < PERF_LOOPS; i++) {
2007     - for (j = 0; j < NODES; j++)
2008     + for (i = 0; i < perf_loops; i++) {
2009     + for (j = 0; j < nnodes; j++)
2010     interval_tree_insert(nodes + j, &root);
2011     - for (j = 0; j < NODES; j++)
2012     + for (j = 0; j < nnodes; j++)
2013     interval_tree_remove(nodes + j, &root);
2014     }
2015    
2016     time2 = get_cycles();
2017     time = time2 - time1;
2018    
2019     - time = div_u64(time, PERF_LOOPS);
2020     + time = div_u64(time, perf_loops);
2021     printk(" -> %llu cycles\n", (unsigned long long)time);
2022    
2023     printk(KERN_ALERT "interval tree search");
2024    
2025     - for (j = 0; j < NODES; j++)
2026     + for (j = 0; j < nnodes; j++)
2027     interval_tree_insert(nodes + j, &root);
2028    
2029     time1 = get_cycles();
2030    
2031     results = 0;
2032     - for (i = 0; i < SEARCH_LOOPS; i++)
2033     - for (j = 0; j < SEARCHES; j++)
2034     - results += search(queries[j], &root);
2035     + for (i = 0; i < search_loops; i++)
2036     + for (j = 0; j < nsearches; j++) {
2037     + unsigned long start = search_all ? 0 : queries[j];
2038     + unsigned long last = search_all ? max_endpoint : queries[j];
2039     +
2040     + results += search(&root, start, last);
2041     + }
2042    
2043     time2 = get_cycles();
2044     time = time2 - time1;
2045    
2046     - time = div_u64(time, SEARCH_LOOPS);
2047     - results = div_u64(results, SEARCH_LOOPS);
2048     + time = div_u64(time, search_loops);
2049     + results = div_u64(results, search_loops);
2050     printk(" -> %llu cycles (%lu results)\n",
2051     (unsigned long long)time, results);
2052    
2053     + kfree(queries);
2054     + kfree(nodes);
2055     +
2056     return -EAGAIN; /* Fail will directly unload the module */
2057     }
2058    
2059     diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
2060     index 8b3c9dc88262..afedd3770562 100644
2061     --- a/lib/rbtree_test.c
2062     +++ b/lib/rbtree_test.c
2063     @@ -1,11 +1,18 @@
2064     #include <linux/module.h>
2065     +#include <linux/moduleparam.h>
2066     #include <linux/rbtree_augmented.h>
2067     #include <linux/random.h>
2068     +#include <linux/slab.h>
2069     #include <asm/timex.h>
2070    
2071     -#define NODES 100
2072     -#define PERF_LOOPS 100000
2073     -#define CHECK_LOOPS 100
2074     +#define __param(type, name, init, msg) \
2075     + static type name = init; \
2076     + module_param(name, type, 0444); \
2077     + MODULE_PARM_DESC(name, msg);
2078     +
2079     +__param(int, nnodes, 100, "Number of nodes in the rb-tree");
2080     +__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
2081     +__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
2082    
2083     struct test_node {
2084     u32 key;
2085     @@ -17,7 +24,7 @@ struct test_node {
2086     };
2087    
2088     static struct rb_root root = RB_ROOT;
2089     -static struct test_node nodes[NODES];
2090     +static struct test_node *nodes = NULL;
2091    
2092     static struct rnd_state rnd;
2093    
2094     @@ -95,7 +102,7 @@ static void erase_augmented(struct test_node *node, struct rb_root *root)
2095     static void init(void)
2096     {
2097     int i;
2098     - for (i = 0; i < NODES; i++) {
2099     + for (i = 0; i < nnodes; i++) {
2100     nodes[i].key = prandom_u32_state(&rnd);
2101     nodes[i].val = prandom_u32_state(&rnd);
2102     }
2103     @@ -177,6 +184,10 @@ static int __init rbtree_test_init(void)
2104     int i, j;
2105     cycles_t time1, time2, time;
2106    
2107     + nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL);
2108     + if (!nodes)
2109     + return -ENOMEM;
2110     +
2111     printk(KERN_ALERT "rbtree testing");
2112    
2113     prandom_seed_state(&rnd, 3141592653589793238ULL);
2114     @@ -184,27 +195,27 @@ static int __init rbtree_test_init(void)
2115    
2116     time1 = get_cycles();
2117    
2118     - for (i = 0; i < PERF_LOOPS; i++) {
2119     - for (j = 0; j < NODES; j++)
2120     + for (i = 0; i < perf_loops; i++) {
2121     + for (j = 0; j < nnodes; j++)
2122     insert(nodes + j, &root);
2123     - for (j = 0; j < NODES; j++)
2124     + for (j = 0; j < nnodes; j++)
2125     erase(nodes + j, &root);
2126     }
2127    
2128     time2 = get_cycles();
2129     time = time2 - time1;
2130    
2131     - time = div_u64(time, PERF_LOOPS);
2132     + time = div_u64(time, perf_loops);
2133     printk(" -> %llu cycles\n", (unsigned long long)time);
2134    
2135     - for (i = 0; i < CHECK_LOOPS; i++) {
2136     + for (i = 0; i < check_loops; i++) {
2137     init();
2138     - for (j = 0; j < NODES; j++) {
2139     + for (j = 0; j < nnodes; j++) {
2140     check(j);
2141     insert(nodes + j, &root);
2142     }
2143     - for (j = 0; j < NODES; j++) {
2144     - check(NODES - j);
2145     + for (j = 0; j < nnodes; j++) {
2146     + check(nnodes - j);
2147     erase(nodes + j, &root);
2148     }
2149     check(0);
2150     @@ -216,32 +227,34 @@ static int __init rbtree_test_init(void)
2151    
2152     time1 = get_cycles();
2153    
2154     - for (i = 0; i < PERF_LOOPS; i++) {
2155     - for (j = 0; j < NODES; j++)
2156     + for (i = 0; i < perf_loops; i++) {
2157     + for (j = 0; j < nnodes; j++)
2158     insert_augmented(nodes + j, &root);
2159     - for (j = 0; j < NODES; j++)
2160     + for (j = 0; j < nnodes; j++)
2161     erase_augmented(nodes + j, &root);
2162     }
2163    
2164     time2 = get_cycles();
2165     time = time2 - time1;
2166    
2167     - time = div_u64(time, PERF_LOOPS);
2168     + time = div_u64(time, perf_loops);
2169     printk(" -> %llu cycles\n", (unsigned long long)time);
2170    
2171     - for (i = 0; i < CHECK_LOOPS; i++) {
2172     + for (i = 0; i < check_loops; i++) {
2173     init();
2174     - for (j = 0; j < NODES; j++) {
2175     + for (j = 0; j < nnodes; j++) {
2176     check_augmented(j);
2177     insert_augmented(nodes + j, &root);
2178     }
2179     - for (j = 0; j < NODES; j++) {
2180     - check_augmented(NODES - j);
2181     + for (j = 0; j < nnodes; j++) {
2182     + check_augmented(nnodes - j);
2183     erase_augmented(nodes + j, &root);
2184     }
2185     check_augmented(0);
2186     }
2187    
2188     + kfree(nodes);
2189     +
2190     return -EAGAIN; /* Fail will directly unload the module */
2191     }
2192    
2193     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2194     index 39451c84c785..6e0aa296f134 100644
2195     --- a/net/mac80211/mlme.c
2196     +++ b/net/mac80211/mlme.c
2197     @@ -1867,7 +1867,8 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
2198     params[ac].acm = acm;
2199     params[ac].uapsd = uapsd;
2200    
2201     - if (params[ac].cw_min > params[ac].cw_max) {
2202     + if (params[ac].cw_min == 0 ||
2203     + params[ac].cw_min > params[ac].cw_max) {
2204     sdata_info(sdata,
2205     "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
2206     params[ac].cw_min, params[ac].cw_max, aci);
2207     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
2208     index 685e6d225414..1a8df242d26a 100644
2209     --- a/net/sunrpc/xprt.c
2210     +++ b/net/sunrpc/xprt.c
2211     @@ -778,8 +778,15 @@ void xprt_connect(struct rpc_task *task)
2212     return;
2213     if (xprt_test_and_set_connecting(xprt))
2214     return;
2215     - xprt->stat.connect_start = jiffies;
2216     - xprt->ops->connect(xprt, task);
2217     + /* Race breaker */
2218     + if (!xprt_connected(xprt)) {
2219     + xprt->stat.connect_start = jiffies;
2220     + xprt->ops->connect(xprt, task);
2221     + } else {
2222     + xprt_clear_connecting(xprt);
2223     + task->tk_status = 0;
2224     + rpc_wake_up_queued_task(&xprt->pending, task);
2225     + }
2226     }
2227     xprt_release_write(xprt, task);
2228     }