Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0237-4.9.138-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3316 - (hide annotations) (download)
Tue Mar 12 10:44:38 2019 UTC (5 years, 2 months ago) by niro
File size: 69284 byte(s)
-linux-4.9.138
1 niro 3316 diff --git a/Makefile b/Makefile
2     index 41fe3014b712..ccf2602f664d 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 137
9     +SUBLEVEL = 138
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
14     index 7fde0f88da88..51ed90be770a 100644
15     --- a/arch/alpha/include/asm/termios.h
16     +++ b/arch/alpha/include/asm/termios.h
17     @@ -72,9 +72,15 @@
18     })
19    
20     #define user_termios_to_kernel_termios(k, u) \
21     - copy_from_user(k, u, sizeof(struct termios))
22     + copy_from_user(k, u, sizeof(struct termios2))
23    
24     #define kernel_termios_to_user_termios(u, k) \
25     + copy_to_user(u, k, sizeof(struct termios2))
26     +
27     +#define user_termios_to_kernel_termios_1(k, u) \
28     + copy_from_user(k, u, sizeof(struct termios))
29     +
30     +#define kernel_termios_to_user_termios_1(u, k) \
31     copy_to_user(u, k, sizeof(struct termios))
32    
33     #endif /* _ALPHA_TERMIOS_H */
34     diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
35     index f30c94ae1bdb..7ee8ab577e11 100644
36     --- a/arch/alpha/include/uapi/asm/ioctls.h
37     +++ b/arch/alpha/include/uapi/asm/ioctls.h
38     @@ -31,6 +31,11 @@
39     #define TCXONC _IO('t', 30)
40     #define TCFLSH _IO('t', 31)
41    
42     +#define TCGETS2 _IOR('T', 42, struct termios2)
43     +#define TCSETS2 _IOW('T', 43, struct termios2)
44     +#define TCSETSW2 _IOW('T', 44, struct termios2)
45     +#define TCSETSF2 _IOW('T', 45, struct termios2)
46     +
47     #define TIOCSWINSZ _IOW('t', 103, struct winsize)
48     #define TIOCGWINSZ _IOR('t', 104, struct winsize)
49     #define TIOCSTART _IO('t', 110) /* start output, like ^Q */
50     diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
51     index 879dd3589921..483c7ec2a879 100644
52     --- a/arch/alpha/include/uapi/asm/termbits.h
53     +++ b/arch/alpha/include/uapi/asm/termbits.h
54     @@ -25,6 +25,19 @@ struct termios {
55     speed_t c_ospeed; /* output speed */
56     };
57    
58     +/* Alpha has identical termios and termios2 */
59     +
60     +struct termios2 {
61     + tcflag_t c_iflag; /* input mode flags */
62     + tcflag_t c_oflag; /* output mode flags */
63     + tcflag_t c_cflag; /* control mode flags */
64     + tcflag_t c_lflag; /* local mode flags */
65     + cc_t c_cc[NCCS]; /* control characters */
66     + cc_t c_line; /* line discipline (== c_cc[19]) */
67     + speed_t c_ispeed; /* input speed */
68     + speed_t c_ospeed; /* output speed */
69     +};
70     +
71     /* Alpha has matching termios and ktermios */
72    
73     struct ktermios {
74     @@ -147,6 +160,7 @@ struct ktermios {
75     #define B3000000 00034
76     #define B3500000 00035
77     #define B4000000 00036
78     +#define BOTHER 00037
79    
80     #define CSIZE 00001400
81     #define CS5 00000000
82     @@ -164,6 +178,9 @@ struct ktermios {
83     #define CMSPAR 010000000000 /* mark or space (stick) parity */
84     #define CRTSCTS 020000000000 /* flow control */
85    
86     +#define CIBAUD 07600000
87     +#define IBSHIFT 16
88     +
89     /* c_lflag bits */
90     #define ISIG 0x00000080
91     #define ICANON 0x00000100
92     diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
93     index 8ec4dbbb50b0..47c3fb8d4313 100644
94     --- a/arch/arm/configs/imx_v6_v7_defconfig
95     +++ b/arch/arm/configs/imx_v6_v7_defconfig
96     @@ -361,6 +361,7 @@ CONFIG_ZISOFS=y
97     CONFIG_UDF_FS=m
98     CONFIG_MSDOS_FS=m
99     CONFIG_VFAT_FS=y
100     +CONFIG_TMPFS_POSIX_ACL=y
101     CONFIG_JFFS2_FS=y
102     CONFIG_UBIFS_FS=y
103     CONFIG_NFS_FS=y
104     diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
105     index 20436972537f..a670c70f4def 100644
106     --- a/arch/arm/kvm/arm.c
107     +++ b/arch/arm/kvm/arm.c
108     @@ -1092,8 +1092,6 @@ static void cpu_init_hyp_mode(void *dummy)
109    
110     __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
111     __cpu_init_stage2();
112     -
113     - kvm_arm_init_debug();
114     }
115    
116     static void cpu_hyp_reinit(void)
117     @@ -1108,6 +1106,8 @@ static void cpu_hyp_reinit(void)
118     if (__hyp_get_vectors() == hyp_default_vectors)
119     cpu_init_hyp_mode(NULL);
120     }
121     +
122     + kvm_arm_init_debug();
123     }
124    
125     static void cpu_hyp_reset(void)
126     diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
127     index d18c45c7c394..19ff9ce46c02 100644
128     --- a/arch/mips/include/asm/mach-loongson64/irq.h
129     +++ b/arch/mips/include/asm/mach-loongson64/irq.h
130     @@ -9,7 +9,7 @@
131     #define MIPS_CPU_IRQ_BASE 56
132    
133     #define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
134     -#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
135     +#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */
136     #define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
137    
138     #define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
139     diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
140     index 1723b1762297..e757f36cea6f 100644
141     --- a/arch/mips/kernel/crash.c
142     +++ b/arch/mips/kernel/crash.c
143     @@ -34,6 +34,9 @@ static void crash_shutdown_secondary(void *passed_regs)
144     if (!cpu_online(cpu))
145     return;
146    
147     + /* We won't be sent IPIs any more. */
148     + set_cpu_online(cpu, false);
149     +
150     local_irq_disable();
151     if (!cpumask_test_cpu(cpu, &cpus_in_crash))
152     crash_save_cpu(regs, cpu);
153     diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
154     index 59725204105c..32b567e88b02 100644
155     --- a/arch/mips/kernel/machine_kexec.c
156     +++ b/arch/mips/kernel/machine_kexec.c
157     @@ -96,6 +96,9 @@ machine_kexec(struct kimage *image)
158     *ptr = (unsigned long) phys_to_virt(*ptr);
159     }
160    
161     + /* Mark offline BEFORE disabling local irq. */
162     + set_cpu_online(smp_processor_id(), false);
163     +
164     /*
165     * we do not want to be bothered.
166     */
167     diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
168     index 8e7649088353..027f53e3bc81 100644
169     --- a/arch/mips/loongson64/loongson-3/irq.c
170     +++ b/arch/mips/loongson64/loongson-3/irq.c
171     @@ -44,51 +44,8 @@ void mach_irq_dispatch(unsigned int pending)
172     }
173     }
174    
175     -static struct irqaction cascade_irqaction = {
176     - .handler = no_action,
177     - .flags = IRQF_NO_SUSPEND,
178     - .name = "cascade",
179     -};
180     -
181     -static inline void mask_loongson_irq(struct irq_data *d)
182     -{
183     - clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
184     - irq_disable_hazard();
185     -
186     - /* Workaround: UART IRQ may deliver to any core */
187     - if (d->irq == LOONGSON_UART_IRQ) {
188     - int cpu = smp_processor_id();
189     - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
190     - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
191     - u64 intenclr_addr = smp_group[node_id] |
192     - (u64)(&LOONGSON_INT_ROUTER_INTENCLR);
193     - u64 introuter_lpc_addr = smp_group[node_id] |
194     - (u64)(&LOONGSON_INT_ROUTER_LPC);
195     -
196     - *(volatile u32 *)intenclr_addr = 1 << 10;
197     - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
198     - }
199     -}
200     -
201     -static inline void unmask_loongson_irq(struct irq_data *d)
202     -{
203     - /* Workaround: UART IRQ may deliver to any core */
204     - if (d->irq == LOONGSON_UART_IRQ) {
205     - int cpu = smp_processor_id();
206     - int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
207     - int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
208     - u64 intenset_addr = smp_group[node_id] |
209     - (u64)(&LOONGSON_INT_ROUTER_INTENSET);
210     - u64 introuter_lpc_addr = smp_group[node_id] |
211     - (u64)(&LOONGSON_INT_ROUTER_LPC);
212     -
213     - *(volatile u32 *)intenset_addr = 1 << 10;
214     - *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
215     - }
216     -
217     - set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
218     - irq_enable_hazard();
219     -}
220     +static inline void mask_loongson_irq(struct irq_data *d) { }
221     +static inline void unmask_loongson_irq(struct irq_data *d) { }
222    
223     /* For MIPS IRQs which shared by all cores */
224     static struct irq_chip loongson_irq_chip = {
225     @@ -126,12 +83,11 @@ void __init mach_init_irq(void)
226     mips_cpu_irq_init();
227     init_i8259_irqs();
228     irq_set_chip_and_handler(LOONGSON_UART_IRQ,
229     - &loongson_irq_chip, handle_level_irq);
230     -
231     - /* setup HT1 irq */
232     - setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
233     + &loongson_irq_chip, handle_percpu_irq);
234     + irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ,
235     + &loongson_irq_chip, handle_percpu_irq);
236    
237     - set_c0_status(STATUSF_IP2 | STATUSF_IP6);
238     + set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6);
239     }
240    
241     #ifdef CONFIG_HOTPLUG_CPU
242     diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
243     index 014649be158d..2d6886f09ba3 100644
244     --- a/arch/mips/pci/pci-legacy.c
245     +++ b/arch/mips/pci/pci-legacy.c
246     @@ -116,8 +116,12 @@ static void pcibios_scanbus(struct pci_controller *hose)
247     if (pci_has_flag(PCI_PROBE_ONLY)) {
248     pci_bus_claim_resources(bus);
249     } else {
250     + struct pci_bus *child;
251     +
252     pci_bus_size_bridges(bus);
253     pci_bus_assign_resources(bus);
254     + list_for_each_entry(child, &bus->children, node)
255     + pcie_bus_configure_settings(child);
256     }
257     pci_bus_add_devices(bus);
258     }
259     diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
260     index 0fbd0a0e1cda..e88f4e7f39f3 100644
261     --- a/arch/parisc/kernel/hpmc.S
262     +++ b/arch/parisc/kernel/hpmc.S
263     @@ -83,7 +83,8 @@ END(hpmc_pim_data)
264     .text
265    
266     .import intr_save, code
267     -ENTRY_CFI(os_hpmc)
268     + .align 16
269     +ENTRY(os_hpmc)
270     .os_hpmc:
271    
272     /*
273     @@ -299,11 +300,14 @@ os_hpmc_6:
274    
275     b .
276     nop
277     -ENDPROC_CFI(os_hpmc)
278     + .align 16 /* make function length multiple of 16 bytes */
279     .os_hpmc_end:
280    
281    
282     __INITRODATA
283     - .export os_hpmc_size
284     +.globl os_hpmc_size
285     + .align 4
286     + .type os_hpmc_size, @object
287     + .size os_hpmc_size, 4
288     os_hpmc_size:
289     .word .os_hpmc_end-.os_hpmc
290     diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
291     index 12866ccb5694..5c2199857aa8 100644
292     --- a/arch/powerpc/boot/crt0.S
293     +++ b/arch/powerpc/boot/crt0.S
294     @@ -47,8 +47,10 @@ p_end: .long _end
295     p_pstack: .long _platform_stack_top
296     #endif
297    
298     - .weak _zimage_start
299     .globl _zimage_start
300     + /* Clang appears to require the .weak directive to be after the symbol
301     + * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
302     + .weak _zimage_start
303     _zimage_start:
304     .globl _zimage_start_lib
305     _zimage_start_lib:
306     diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
307     index e5bfbf62827a..8336b9016ca9 100644
308     --- a/arch/powerpc/kernel/eeh.c
309     +++ b/arch/powerpc/kernel/eeh.c
310     @@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
311     int n = 0, l = 0;
312     char buffer[128];
313    
314     + if (!pdn) {
315     + pr_warn("EEH: Note: No error log for absent device.\n");
316     + return 0;
317     + }
318     +
319     n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
320     edev->phb->global_number, pdn->busno,
321     PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
322     diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
323     index 0b50019505a5..79cf21be8f6e 100644
324     --- a/arch/powerpc/mm/tlb_nohash.c
325     +++ b/arch/powerpc/mm/tlb_nohash.c
326     @@ -481,6 +481,9 @@ static void setup_page_sizes(void)
327     for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
328     struct mmu_psize_def *def = &mmu_psize_defs[psize];
329    
330     + if (!def->shift)
331     + continue;
332     +
333     if (tlb1ps & (1U << (def->shift - 10))) {
334     def->flags |= MMU_PAGE_SIZE_DIRECT;
335    
336     diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
337     index ca20a892021b..6c6877d628ef 100644
338     --- a/arch/xtensa/boot/Makefile
339     +++ b/arch/xtensa/boot/Makefile
340     @@ -31,7 +31,7 @@ $(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
341     $(addprefix $(obj)/,$(host-progs))
342     $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
343    
344     -OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
345     +OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
346    
347     vmlinux.bin: vmlinux FORCE
348     $(call if_changed,objcopy)
349     diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
350     index b42d68bfe3cf..521c1e789e6e 100644
351     --- a/arch/xtensa/include/asm/processor.h
352     +++ b/arch/xtensa/include/asm/processor.h
353     @@ -24,7 +24,11 @@
354     # error Linux requires the Xtensa Windowed Registers Option.
355     #endif
356    
357     -#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
358     +/* Xtensa ABI requires stack alignment to be at least 16 */
359     +
360     +#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
361     +
362     +#define ARCH_SLAB_MINALIGN STACK_ALIGN
363    
364     /*
365     * User space process size: 1 GB.
366     diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
367     index 23ce62e60435..27c8e07ace43 100644
368     --- a/arch/xtensa/kernel/head.S
369     +++ b/arch/xtensa/kernel/head.S
370     @@ -88,9 +88,12 @@ _SetupMMU:
371     initialize_mmu
372     #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
373     rsr a2, excsave1
374     - movi a3, 0x08000000
375     + movi a3, XCHAL_KSEG_PADDR
376     + bltu a2, a3, 1f
377     + sub a2, a2, a3
378     + movi a3, XCHAL_KSEG_SIZE
379     bgeu a2, a3, 1f
380     - movi a3, 0xd0000000
381     + movi a3, XCHAL_KSEG_CACHED_VADDR
382     add a2, a2, a3
383     wsr a2, excsave1
384     1:
385     diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
386     index 31411fc82662..e8358ea0a9f9 100644
387     --- a/arch/xtensa/kernel/vmlinux.lds.S
388     +++ b/arch/xtensa/kernel/vmlinux.lds.S
389     @@ -109,6 +109,7 @@ SECTIONS
390     .fixup : { *(.fixup) }
391    
392     EXCEPTION_TABLE(16)
393     + NOTES
394     /* Data section */
395    
396     _sdata = .;
397     diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
398     index 987e8f503522..ff4280800cd0 100644
399     --- a/drivers/cdrom/cdrom.c
400     +++ b/drivers/cdrom/cdrom.c
401     @@ -2435,7 +2435,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
402     return -ENOSYS;
403    
404     if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
405     - if ((int)arg >= cdi->capacity)
406     + if (arg >= cdi->capacity)
407     return -EINVAL;
408     }
409    
410     diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
411     index 2bb2551c6245..a17a428fa706 100644
412     --- a/drivers/clk/at91/clk-pll.c
413     +++ b/drivers/clk/at91/clk-pll.c
414     @@ -133,6 +133,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
415     {
416     struct clk_pll *pll = to_clk_pll(hw);
417    
418     + if (!pll->div || !pll->mul)
419     + return 0;
420     +
421     return (parent_rate / pll->div) * (pll->mul + 1);
422     }
423    
424     diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
425     index fbaa84a33c46..14071a57c926 100644
426     --- a/drivers/clk/clk-s2mps11.c
427     +++ b/drivers/clk/clk-s2mps11.c
428     @@ -245,6 +245,36 @@ static const struct platform_device_id s2mps11_clk_id[] = {
429     };
430     MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
431    
432     +#ifdef CONFIG_OF
433     +/*
434     + * Device is instantiated through parent MFD device and device matching is done
435     + * through platform_device_id.
436     + *
437     + * However if device's DT node contains proper clock compatible and driver is
438     + * built as a module, then the *module* matching will be done trough DT aliases.
439     + * This requires of_device_id table. In the same time this will not change the
440     + * actual *device* matching so do not add .of_match_table.
441     + */
442     +static const struct of_device_id s2mps11_dt_match[] = {
443     + {
444     + .compatible = "samsung,s2mps11-clk",
445     + .data = (void *)S2MPS11X,
446     + }, {
447     + .compatible = "samsung,s2mps13-clk",
448     + .data = (void *)S2MPS13X,
449     + }, {
450     + .compatible = "samsung,s2mps14-clk",
451     + .data = (void *)S2MPS14X,
452     + }, {
453     + .compatible = "samsung,s5m8767-clk",
454     + .data = (void *)S5M8767X,
455     + }, {
456     + /* Sentinel */
457     + },
458     +};
459     +MODULE_DEVICE_TABLE(of, s2mps11_dt_match);
460     +#endif
461     +
462     static struct platform_driver s2mps11_clk_driver = {
463     .driver = {
464     .name = "s2mps11-clk",
465     diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
466     index 2a5015c736ce..43e82fa64422 100644
467     --- a/drivers/clk/hisilicon/reset.c
468     +++ b/drivers/clk/hisilicon/reset.c
469     @@ -109,9 +109,8 @@ struct hisi_reset_controller *hisi_reset_init(struct platform_device *pdev)
470     return NULL;
471    
472     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
473     - rstc->membase = devm_ioremap(&pdev->dev,
474     - res->start, resource_size(res));
475     - if (!rstc->membase)
476     + rstc->membase = devm_ioremap_resource(&pdev->dev, res);
477     + if (IS_ERR(rstc->membase))
478     return NULL;
479    
480     spin_lock_init(&rstc->lock);
481     diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
482     index e8075359366b..ebce5260068b 100644
483     --- a/drivers/clk/rockchip/clk-ddr.c
484     +++ b/drivers/clk/rockchip/clk-ddr.c
485     @@ -80,16 +80,12 @@ static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
486     static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
487     {
488     struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
489     - int num_parents = clk_hw_get_num_parents(hw);
490     u32 val;
491    
492     val = clk_readl(ddrclk->reg_base +
493     ddrclk->mux_offset) >> ddrclk->mux_shift;
494     val &= GENMASK(ddrclk->mux_width - 1, 0);
495    
496     - if (val >= num_parents)
497     - return -EINVAL;
498     -
499     return val;
500     }
501    
502     diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
503     index 0efd36e483ab..a2b545fdee81 100644
504     --- a/drivers/clocksource/i8253.c
505     +++ b/drivers/clocksource/i8253.c
506     @@ -19,6 +19,13 @@
507     DEFINE_RAW_SPINLOCK(i8253_lock);
508     EXPORT_SYMBOL(i8253_lock);
509    
510     +/*
511     + * Handle PIT quirk in pit_shutdown() where zeroing the counter register
512     + * restarts the PIT, negating the shutdown. On platforms with the quirk,
513     + * platform specific code can set this to false.
514     + */
515     +bool i8253_clear_counter_on_shutdown __ro_after_init = true;
516     +
517     #ifdef CONFIG_CLKSRC_I8253
518     /*
519     * Since the PIT overflows every tick, its not very useful
520     @@ -108,8 +115,11 @@ static int pit_shutdown(struct clock_event_device *evt)
521     raw_spin_lock(&i8253_lock);
522    
523     outb_p(0x30, PIT_MODE);
524     - outb_p(0, PIT_CH0);
525     - outb_p(0, PIT_CH0);
526     +
527     + if (i8253_clear_counter_on_shutdown) {
528     + outb_p(0, PIT_CH0);
529     + outb_p(0, PIT_CH0);
530     + }
531    
532     raw_spin_unlock(&i8253_lock);
533     return 0;
534     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
535     index db7890cb254e..b59441d109a5 100644
536     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
537     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
538     @@ -1230,6 +1230,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
539     mutex_lock(&mgr->lock);
540     mstb = mgr->mst_primary;
541    
542     + if (!mstb)
543     + goto out;
544     +
545     for (i = 0; i < lct - 1; i++) {
546     int shift = (i % 2) ? 0 : 4;
547     int port_num = (rad[i / 2] >> shift) & 0xf;
548     diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
549     index 6c70a5bfd7d8..840522867436 100644
550     --- a/drivers/gpu/drm/i915/intel_audio.c
551     +++ b/drivers/gpu/drm/i915/intel_audio.c
552     @@ -76,6 +76,9 @@ static const struct {
553     /* HDMI N/CTS table */
554     #define TMDS_297M 297000
555     #define TMDS_296M 296703
556     +#define TMDS_594M 594000
557     +#define TMDS_593M 593407
558     +
559     static const struct {
560     int sample_rate;
561     int clock;
562     @@ -96,6 +99,20 @@ static const struct {
563     { 176400, TMDS_297M, 18816, 247500 },
564     { 192000, TMDS_296M, 23296, 281250 },
565     { 192000, TMDS_297M, 20480, 247500 },
566     + { 44100, TMDS_593M, 8918, 937500 },
567     + { 44100, TMDS_594M, 9408, 990000 },
568     + { 48000, TMDS_593M, 5824, 562500 },
569     + { 48000, TMDS_594M, 6144, 594000 },
570     + { 32000, TMDS_593M, 5824, 843750 },
571     + { 32000, TMDS_594M, 3072, 445500 },
572     + { 88200, TMDS_593M, 17836, 937500 },
573     + { 88200, TMDS_594M, 18816, 990000 },
574     + { 96000, TMDS_593M, 11648, 562500 },
575     + { 96000, TMDS_594M, 12288, 594000 },
576     + { 176400, TMDS_593M, 35672, 937500 },
577     + { 176400, TMDS_594M, 37632, 990000 },
578     + { 192000, TMDS_593M, 23296, 562500 },
579     + { 192000, TMDS_594M, 24576, 594000 },
580     };
581    
582     /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
583     diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
584     index 67db1577ee49..fd11be6b23b9 100644
585     --- a/drivers/gpu/drm/i915/intel_lrc.c
586     +++ b/drivers/gpu/drm/i915/intel_lrc.c
587     @@ -368,7 +368,8 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
588    
589     reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
590    
591     - /* True 32b PPGTT with dynamic page allocation: update PDP
592     + /*
593     + * True 32b PPGTT with dynamic page allocation: update PDP
594     * registers and point the unallocated PDPs to scratch page.
595     * PML4 is allocated during ppgtt init, so this is not needed
596     * in 48-bit mode.
597     @@ -376,6 +377,17 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
598     if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
599     execlists_update_context_pdps(ppgtt, reg_state);
600    
601     + /*
602     + * Make sure the context image is complete before we submit it to HW.
603     + *
604     + * Ostensibly, writes (including the WCB) should be flushed prior to
605     + * an uncached write such as our mmio register access, the empirical
606     + * evidence (esp. on Braswell) suggests that the WC write into memory
607     + * may not be visible to the HW prior to the completion of the UC
608     + * register write and that we may begin execution from the context
609     + * before its image is complete leading to invalid PD chasing.
610     + */
611     + wmb();
612     return ce->lrc_desc;
613     }
614    
615     diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
616     index 7def04049498..6a0b25e0823f 100644
617     --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
618     +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
619     @@ -273,6 +273,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
620     }
621    
622     txn->last_pat->next_pa = 0;
623     + /* ensure that the written descriptors are visible to DMM */
624     + wmb();
625     +
626     + /*
627     + * NOTE: the wmb() above should be enough, but there seems to be a bug
628     + * in OMAP's memory barrier implementation, which in some rare cases may
629     + * cause the writes not to be observable after wmb().
630     + */
631     +
632     + /* read back to ensure the data is in RAM */
633     + readl(&txn->last_pat->next_pa);
634    
635     /* write to PAT_DESCR to clear out any pending transaction */
636     dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
637     diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
638     index 8c8cbe837e61..f2033ab36f37 100644
639     --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
640     +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
641     @@ -478,6 +478,11 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
642     return 0;
643     }
644    
645     +static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
646     +{
647     + rockchip_drm_platform_remove(pdev);
648     +}
649     +
650     static const struct of_device_id rockchip_drm_dt_ids[] = {
651     { .compatible = "rockchip,display-subsystem", },
652     { /* sentinel */ },
653     @@ -487,6 +492,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
654     static struct platform_driver rockchip_drm_platform_driver = {
655     .probe = rockchip_drm_platform_probe,
656     .remove = rockchip_drm_platform_remove,
657     + .shutdown = rockchip_drm_platform_shutdown,
658     .driver = {
659     .name = "rockchip-drm",
660     .of_match_table = rockchip_drm_dt_ids,
661     diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
662     index 5d9c2b03d83a..e83f8111a5fb 100644
663     --- a/drivers/media/i2c/tvp5150.c
664     +++ b/drivers/media/i2c/tvp5150.c
665     @@ -897,9 +897,6 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
666    
667     /* tvp5150 has some special limits */
668     rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
669     - rect.width = clamp_t(unsigned int, rect.width,
670     - TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
671     - TVP5150_H_MAX - rect.left);
672     rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
673    
674     /* Calculate height based on current standard */
675     @@ -913,9 +910,16 @@ static int tvp5150_set_selection(struct v4l2_subdev *sd,
676     else
677     hmax = TVP5150_V_MAX_OTHERS;
678    
679     - rect.height = clamp_t(unsigned int, rect.height,
680     + /*
681     + * alignments:
682     + * - width = 2 due to UYVY colorspace
683     + * - height, image = no special alignment
684     + */
685     + v4l_bound_align_image(&rect.width,
686     + TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
687     + TVP5150_H_MAX - rect.left, 1, &rect.height,
688     hmax - TVP5150_MAX_CROP_TOP - rect.top,
689     - hmax - rect.top);
690     + hmax - rect.top, 0, 0);
691    
692     tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
693     tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
694     diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
695     index aaf4e46ff3e9..a0c1ff97f905 100644
696     --- a/drivers/media/pci/cx23885/altera-ci.c
697     +++ b/drivers/media/pci/cx23885/altera-ci.c
698     @@ -660,6 +660,10 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
699     }
700    
701     temp_int = append_internal(inter);
702     + if (!temp_int) {
703     + ret = -ENOMEM;
704     + goto err;
705     + }
706     inter->filts_used = 1;
707     inter->dev = config->dev;
708     inter->fpga_rw = config->fpga_rw;
709     @@ -694,6 +698,7 @@ err:
710     __func__, ret);
711    
712     kfree(pid_filt);
713     + kfree(inter);
714    
715     return ret;
716     }
717     @@ -728,6 +733,10 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr)
718     }
719    
720     temp_int = append_internal(inter);
721     + if (!temp_int) {
722     + ret = -ENOMEM;
723     + goto err;
724     + }
725     inter->cis_used = 1;
726     inter->dev = config->dev;
727     inter->fpga_rw = config->fpga_rw;
728     @@ -796,6 +805,7 @@ err:
729     ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
730    
731     kfree(state);
732     + kfree(inter);
733    
734     return ret;
735     }
736     diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
737     index 58329d2dacd1..75c816a5dded 100644
738     --- a/drivers/mtd/devices/Kconfig
739     +++ b/drivers/mtd/devices/Kconfig
740     @@ -196,7 +196,7 @@ comment "Disk-On-Chip Device Drivers"
741     config MTD_DOCG3
742     tristate "M-Systems Disk-On-Chip G3"
743     select BCH
744     - select BCH_CONST_PARAMS
745     + select BCH_CONST_PARAMS if !MTD_NAND_BCH
746     select BITREVERSE
747     ---help---
748     This provides an MTD device driver for the M-Systems DiskOnChip
749     diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
750     index 31f61a744d66..9473d12ce239 100644
751     --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
752     +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
753     @@ -541,8 +541,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
754     for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
755     BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
756     ETH_GSTRING_LEN));
757     - memcpy(string, bnad_net_stats_strings[i],
758     - ETH_GSTRING_LEN);
759     + strncpy(string, bnad_net_stats_strings[i],
760     + ETH_GSTRING_LEN);
761     string += ETH_GSTRING_LEN;
762     }
763     bmap = bna_tx_rid_mask(&bnad->bna);
764     diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
765     index e84574b1eae7..2a81f6d72140 100644
766     --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
767     +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
768     @@ -1826,11 +1826,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
769     {
770     struct e1000_adapter *adapter = netdev_priv(netdev);
771     int i;
772     - char *p = NULL;
773     const struct e1000_stats *stat = e1000_gstrings_stats;
774    
775     e1000_update_stats(adapter);
776     - for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
777     + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
778     + char *p;
779     +
780     switch (stat->type) {
781     case NETDEV_STATS:
782     p = (char *)netdev + stat->stat_offset;
783     @@ -1841,15 +1842,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
784     default:
785     WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
786     stat->type, i);
787     - break;
788     + continue;
789     }
790    
791     if (stat->sizeof_stat == sizeof(u64))
792     data[i] = *(u64 *)p;
793     else
794     data[i] = *(u32 *)p;
795     -
796     - stat++;
797     }
798     /* BUG_ON(i != E1000_STATS_LEN); */
799     }
800     diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
801     index dd112aa5cebb..39a09e18c1b7 100644
802     --- a/drivers/net/ethernet/intel/e1000/e1000_main.c
803     +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
804     @@ -521,8 +521,6 @@ void e1000_down(struct e1000_adapter *adapter)
805     struct net_device *netdev = adapter->netdev;
806     u32 rctl, tctl;
807    
808     - netif_carrier_off(netdev);
809     -
810     /* disable receives in the hardware */
811     rctl = er32(RCTL);
812     ew32(RCTL, rctl & ~E1000_RCTL_EN);
813     @@ -538,6 +536,15 @@ void e1000_down(struct e1000_adapter *adapter)
814     E1000_WRITE_FLUSH();
815     msleep(10);
816    
817     + /* Set the carrier off after transmits have been disabled in the
818     + * hardware, to avoid race conditions with e1000_watchdog() (which
819     + * may be running concurrently to us, checking for the carrier
820     + * bit to decide whether it should enable transmits again). Such
821     + * a race condition would result into transmission being disabled
822     + * in the hardware until the next IFF_DOWN+IFF_UP cycle.
823     + */
824     + netif_carrier_off(netdev);
825     +
826     napi_disable(&adapter->napi);
827    
828     e1000_irq_disable(adapter);
829     diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
830     index 2db1f7a04baf..0b29ee9ee8c3 100644
831     --- a/drivers/of/of_numa.c
832     +++ b/drivers/of/of_numa.c
833     @@ -126,9 +126,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
834     distance = of_read_number(matrix, 1);
835     matrix++;
836    
837     + if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
838     + (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
839     + pr_err("Invalid distance[node%d -> node%d] = %d\n",
840     + nodea, nodeb, distance);
841     + return -EINVAL;
842     + }
843     +
844     numa_set_distance(nodea, nodeb, distance);
845     - pr_debug("distance[node%d -> node%d] = %d\n",
846     - nodea, nodeb, distance);
847    
848     /* Set default distance of node B->A same as A->B */
849     if (nodeb > nodea)
850     diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
851     index e79f2a181ad2..b9ec4a16db1f 100644
852     --- a/drivers/rtc/hctosys.c
853     +++ b/drivers/rtc/hctosys.c
854     @@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
855     tv64.tv_sec = rtc_tm_to_time64(&tm);
856    
857     #if BITS_PER_LONG == 32
858     - if (tv64.tv_sec > INT_MAX)
859     + if (tv64.tv_sec > INT_MAX) {
860     + err = -ERANGE;
861     goto err_read;
862     + }
863     #endif
864    
865     err = do_settimeofday64(&tv64);
866     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
867     index b6d9e3104b89..2e3a70a6b300 100644
868     --- a/drivers/scsi/qla2xxx/qla_init.c
869     +++ b/drivers/scsi/qla2xxx/qla_init.c
870     @@ -4894,7 +4894,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
871     * The next call disables the board
872     * completely.
873     */
874     - ha->isp_ops->reset_adapter(vha);
875     + qla2x00_abort_isp_cleanup(vha);
876     vha->flags.online = 0;
877     clear_bit(ISP_ABORT_RETRY,
878     &vha->dpc_flags);
879     diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
880     index a1b01d66c9ab..bf29ad454118 100644
881     --- a/drivers/scsi/qla2xxx/qla_mbx.c
882     +++ b/drivers/scsi/qla2xxx/qla_mbx.c
883     @@ -3580,10 +3580,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
884     mcp->mb[0] = MBC_PORT_PARAMS;
885     mcp->mb[1] = loop_id;
886     mcp->mb[2] = BIT_0;
887     - if (IS_CNA_CAPABLE(vha->hw))
888     - mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
889     - else
890     - mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
891     + mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
892     mcp->mb[9] = vha->vp_idx;
893     mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
894     mcp->in_mb = MBX_3|MBX_1|MBX_0;
895     diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
896     index 793395451982..ea6b62cece88 100644
897     --- a/drivers/tty/serial/sc16is7xx.c
898     +++ b/drivers/tty/serial/sc16is7xx.c
899     @@ -661,7 +661,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
900     uart_write_wakeup(port);
901     }
902    
903     -static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
904     +static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
905     {
906     struct uart_port *port = &s->p[portno].port;
907    
908     @@ -670,7 +670,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
909    
910     iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
911     if (iir & SC16IS7XX_IIR_NO_INT_BIT)
912     - break;
913     + return false;
914    
915     iir &= SC16IS7XX_IIR_ID_MASK;
916    
917     @@ -692,16 +692,23 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
918     port->line, iir);
919     break;
920     }
921     - } while (1);
922     + } while (0);
923     + return true;
924     }
925    
926     static void sc16is7xx_ist(struct kthread_work *ws)
927     {
928     struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
929     - int i;
930    
931     - for (i = 0; i < s->devtype->nr_uart; ++i)
932     - sc16is7xx_port_irq(s, i);
933     + while (1) {
934     + bool keep_polling = false;
935     + int i;
936     +
937     + for (i = 0; i < s->devtype->nr_uart; ++i)
938     + keep_polling |= sc16is7xx_port_irq(s, i);
939     + if (!keep_polling)
940     + break;
941     + }
942     }
943    
944     static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
945     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
946     index e6429d419b80..e6c4321d695c 100644
947     --- a/drivers/tty/tty_io.c
948     +++ b/drivers/tty/tty_io.c
949     @@ -354,7 +354,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
950     mutex_lock(&tty_mutex);
951     /* Search through the tty devices to look for a match */
952     list_for_each_entry(p, &tty_drivers, tty_drivers) {
953     - if (strncmp(name, p->name, len) != 0)
954     + if (!len || strncmp(name, p->name, len) != 0)
955     continue;
956     stp = str;
957     if (*stp == ',')
958     diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
959     index bf36ac9aee41..11bb9a5c700d 100644
960     --- a/drivers/tty/tty_ioctl.c
961     +++ b/drivers/tty/tty_ioctl.c
962     @@ -325,7 +325,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
963     else
964     cbaud += 15;
965     }
966     - return baud_table[cbaud];
967     + return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
968     }
969     EXPORT_SYMBOL(tty_termios_baud_rate);
970    
971     @@ -361,7 +361,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
972     else
973     cbaud += 15;
974     }
975     - return baud_table[cbaud];
976     + return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
977     #else
978     return tty_termios_baud_rate(termios);
979     #endif
980     diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
981     index 9e36632b6f0e..17f9ad6fdfa5 100644
982     --- a/drivers/vhost/scsi.c
983     +++ b/drivers/vhost/scsi.c
984     @@ -999,7 +999,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
985     prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
986     }
987     /*
988     - * Set prot_iter to data_iter, and advance past any
989     + * Set prot_iter to data_iter and truncate it to
990     + * prot_bytes, and advance data_iter past any
991     * preceeding prot_bytes that may be present.
992     *
993     * Also fix up the exp_data_len to reflect only the
994     @@ -1008,6 +1009,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
995     if (prot_bytes) {
996     exp_data_len -= prot_bytes;
997     prot_iter = data_iter;
998     + iov_iter_truncate(&prot_iter, prot_bytes);
999     iov_iter_advance(&data_iter, prot_bytes);
1000     }
1001     tag = vhost64_to_cpu(vq, v_req_pi.tag);
1002     diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c
1003     index 182bd680141f..e9dfe0e40b8b 100644
1004     --- a/drivers/video/fbdev/aty/mach64_accel.c
1005     +++ b/drivers/video/fbdev/aty/mach64_accel.c
1006     @@ -126,7 +126,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
1007    
1008     /* set host attributes */
1009     wait_for_fifo(13, par);
1010     - aty_st_le32(HOST_CNTL, 0, par);
1011     + aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
1012    
1013     /* set pattern attributes */
1014     aty_st_le32(PAT_REG0, 0, par);
1015     @@ -232,7 +232,8 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
1016     rotation = rotation24bpp(dx, direction);
1017     }
1018    
1019     - wait_for_fifo(4, par);
1020     + wait_for_fifo(5, par);
1021     + aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
1022     aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par);
1023     aty_st_le32(SRC_Y_X, (sx << 16) | sy, par);
1024     aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par);
1025     @@ -268,7 +269,8 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
1026     rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT);
1027     }
1028    
1029     - wait_for_fifo(3, par);
1030     + wait_for_fifo(4, par);
1031     + aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
1032     aty_st_le32(DP_FRGD_CLR, color, par);
1033     aty_st_le32(DP_SRC,
1034     BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE,
1035     @@ -283,7 +285,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
1036     {
1037     struct atyfb_par *par = (struct atyfb_par *) info->par;
1038     u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width;
1039     - u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix;
1040     + u32 pix_width, rotation = 0, src, mix;
1041    
1042     if (par->asleep)
1043     return;
1044     @@ -295,8 +297,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
1045     return;
1046     }
1047    
1048     - pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
1049     - host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
1050     + pix_width = par->crtc.dp_pix_width;
1051    
1052     switch (image->depth) {
1053     case 1:
1054     @@ -344,7 +345,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
1055     * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit
1056     * this hwaccelerated triple has an issue with not aligned data
1057     */
1058     - if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
1059     + if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
1060     pix_width |= DP_HOST_TRIPLE_EN;
1061     }
1062    
1063     @@ -369,19 +370,18 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
1064     mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D;
1065     }
1066    
1067     - wait_for_fifo(6, par);
1068     - aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par);
1069     + wait_for_fifo(5, par);
1070     aty_st_le32(DP_PIX_WIDTH, pix_width, par);
1071     aty_st_le32(DP_MIX, mix, par);
1072     aty_st_le32(DP_SRC, src, par);
1073     - aty_st_le32(HOST_CNTL, host_cntl, par);
1074     + aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
1075     aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par);
1076    
1077     draw_rect(dx, dy, width, image->height, par);
1078     src_bytes = (((image->width * image->depth) + 7) / 8) * image->height;
1079    
1080     /* manual triple each pixel */
1081     - if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
1082     + if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
1083     int inbit, outbit, mult24, byte_id_in_dword, width;
1084     u8 *pbitmapin = (u8*)image->data, *pbitmapout;
1085     u32 hostdword;
1086     @@ -414,7 +414,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
1087     }
1088     }
1089     wait_for_fifo(1, par);
1090     - aty_st_le32(HOST_DATA0, hostdword, par);
1091     + aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par);
1092     }
1093     } else {
1094     u32 *pbitmap, dwords = (src_bytes + 3) / 4;
1095     @@ -423,8 +423,4 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
1096     aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
1097     }
1098     }
1099     -
1100     - /* restore pix_width */
1101     - wait_for_fifo(1, par);
1102     - aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
1103     }
1104     diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
1105     index d7b78d531e63..398a3eddb2df 100644
1106     --- a/fs/9p/vfs_file.c
1107     +++ b/fs/9p/vfs_file.c
1108     @@ -204,6 +204,14 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
1109     break;
1110     if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
1111     break;
1112     + /*
1113     + * p9_client_lock_dotl overwrites flock.client_id with the
1114     + * server message, free and reuse the client name
1115     + */
1116     + if (flock.client_id != fid->clnt->name) {
1117     + kfree(flock.client_id);
1118     + flock.client_id = fid->clnt->name;
1119     + }
1120     }
1121    
1122     /* map 9p status to VFS status */
1123     @@ -235,6 +243,8 @@ out_unlock:
1124     locks_lock_file_wait(filp, fl);
1125     fl->fl_type = fl_type;
1126     }
1127     + if (flock.client_id != fid->clnt->name)
1128     + kfree(flock.client_id);
1129     out:
1130     return res;
1131     }
1132     @@ -269,7 +279,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
1133    
1134     res = p9_client_getlock_dotl(fid, &glock);
1135     if (res < 0)
1136     - return res;
1137     + goto out;
1138     /* map 9p lock type to os lock type */
1139     switch (glock.type) {
1140     case P9_LOCK_TYPE_RDLCK:
1141     @@ -290,7 +300,9 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
1142     fl->fl_end = glock.start + glock.length - 1;
1143     fl->fl_pid = glock.proc_id;
1144     }
1145     - kfree(glock.client_id);
1146     +out:
1147     + if (glock.client_id != fid->clnt->name)
1148     + kfree(glock.client_id);
1149     return res;
1150     }
1151    
1152     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1153     index 17e143d91fa9..1b1a9e35e082 100644
1154     --- a/fs/btrfs/inode.c
1155     +++ b/fs/btrfs/inode.c
1156     @@ -1548,12 +1548,11 @@ out_check:
1157     }
1158     btrfs_release_path(path);
1159    
1160     - if (cur_offset <= end && cow_start == (u64)-1) {
1161     + if (cur_offset <= end && cow_start == (u64)-1)
1162     cow_start = cur_offset;
1163     - cur_offset = end;
1164     - }
1165    
1166     if (cow_start != (u64)-1) {
1167     + cur_offset = end;
1168     ret = cow_file_range(inode, locked_page, cow_start, end, end,
1169     page_started, nr_written, 1, NULL);
1170     if (ret)
1171     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1172     index cbf512b64597..96ad2778405b 100644
1173     --- a/fs/btrfs/ioctl.c
1174     +++ b/fs/btrfs/ioctl.c
1175     @@ -3911,9 +3911,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
1176     goto out_unlock;
1177     if (len == 0)
1178     olen = len = src->i_size - off;
1179     - /* if we extend to eof, continue to block boundary */
1180     - if (off + len == src->i_size)
1181     + /*
1182     + * If we extend to eof, continue to block boundary if and only if the
1183     + * destination end offset matches the destination file's size, otherwise
1184     + * we would be corrupting data by placing the eof block into the middle
1185     + * of a file.
1186     + */
1187     + if (off + len == src->i_size) {
1188     + if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
1189     + goto out_unlock;
1190     len = ALIGN(src->i_size, bs) - off;
1191     + }
1192    
1193     if (len == 0) {
1194     ret = 0;
1195     diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
1196     index 1f754336f801..30d9d9e7057d 100644
1197     --- a/fs/ceph/inode.c
1198     +++ b/fs/ceph/inode.c
1199     @@ -1077,8 +1077,12 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
1200     if (IS_ERR(realdn)) {
1201     pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1202     PTR_ERR(realdn), dn, in, ceph_vinop(in));
1203     - dput(dn);
1204     - dn = realdn; /* note realdn contains the error */
1205     + dn = realdn;
1206     + /*
1207     + * Caller should release 'dn' in the case of error.
1208     + * If 'req->r_dentry' is passed to this function,
1209     + * caller should leave 'req->r_dentry' untouched.
1210     + */
1211     goto out;
1212     } else if (realdn) {
1213     dout("dn %p (%d) spliced with %p (%d) "
1214     diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
1215     index 314b4edac72b..fea6db1ee065 100644
1216     --- a/fs/configfs/symlink.c
1217     +++ b/fs/configfs/symlink.c
1218     @@ -64,7 +64,7 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
1219    
1220     /* back up enough to print this bus id with '/' */
1221     length -= cur;
1222     - strncpy(buffer + length,config_item_name(p),cur);
1223     + memcpy(buffer + length, config_item_name(p), cur);
1224     *(buffer + --length) = '/';
1225     }
1226     }
1227     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1228     index bc727c393a89..3c3757ee11f0 100644
1229     --- a/fs/ext4/namei.c
1230     +++ b/fs/ext4/namei.c
1231     @@ -124,6 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
1232     if (!is_dx_block && type == INDEX) {
1233     ext4_error_inode(inode, func, line, block,
1234     "directory leaf block found instead of index block");
1235     + brelse(bh);
1236     return ERR_PTR(-EFSCORRUPTED);
1237     }
1238     if (!ext4_has_metadata_csum(inode->i_sb) ||
1239     @@ -2842,7 +2843,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
1240     list_del_init(&EXT4_I(inode)->i_orphan);
1241     mutex_unlock(&sbi->s_orphan_lock);
1242     }
1243     - }
1244     + } else
1245     + brelse(iloc.bh);
1246     +
1247     jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
1248     jbd_debug(4, "orphan inode %lu will point to %d\n",
1249     inode->i_ino, NEXT_ORPHAN(inode));
1250     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1251     index 1da301ee78ce..9be605c63ae1 100644
1252     --- a/fs/ext4/resize.c
1253     +++ b/fs/ext4/resize.c
1254     @@ -442,16 +442,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
1255    
1256     BUFFER_TRACE(bh, "get_write_access");
1257     err = ext4_journal_get_write_access(handle, bh);
1258     - if (err)
1259     + if (err) {
1260     + brelse(bh);
1261     return err;
1262     + }
1263     ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
1264     block - start, count2);
1265     ext4_set_bits(bh->b_data, block - start, count2);
1266    
1267     err = ext4_handle_dirty_metadata(handle, NULL, bh);
1268     + brelse(bh);
1269     if (unlikely(err))
1270     return err;
1271     - brelse(bh);
1272     }
1273    
1274     return 0;
1275     @@ -588,7 +590,6 @@ handle_bb:
1276     bh = bclean(handle, sb, block);
1277     if (IS_ERR(bh)) {
1278     err = PTR_ERR(bh);
1279     - bh = NULL;
1280     goto out;
1281     }
1282     overhead = ext4_group_overhead_blocks(sb, group);
1283     @@ -600,9 +601,9 @@ handle_bb:
1284     ext4_mark_bitmap_end(group_data[i].blocks_count,
1285     sb->s_blocksize * 8, bh->b_data);
1286     err = ext4_handle_dirty_metadata(handle, NULL, bh);
1287     + brelse(bh);
1288     if (err)
1289     goto out;
1290     - brelse(bh);
1291    
1292     handle_ib:
1293     if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
1294     @@ -617,18 +618,16 @@ handle_ib:
1295     bh = bclean(handle, sb, block);
1296     if (IS_ERR(bh)) {
1297     err = PTR_ERR(bh);
1298     - bh = NULL;
1299     goto out;
1300     }
1301    
1302     ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
1303     sb->s_blocksize * 8, bh->b_data);
1304     err = ext4_handle_dirty_metadata(handle, NULL, bh);
1305     + brelse(bh);
1306     if (err)
1307     goto out;
1308     - brelse(bh);
1309     }
1310     - bh = NULL;
1311    
1312     /* Mark group tables in block bitmap */
1313     for (j = 0; j < GROUP_TABLE_COUNT; j++) {
1314     @@ -659,7 +658,6 @@ handle_ib:
1315     }
1316    
1317     out:
1318     - brelse(bh);
1319     err2 = ext4_journal_stop(handle);
1320     if (err2 && !err)
1321     err = err2;
1322     @@ -846,6 +844,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
1323     err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1324     if (unlikely(err)) {
1325     ext4_std_error(sb, err);
1326     + iloc.bh = NULL;
1327     goto exit_inode;
1328     }
1329     brelse(dind);
1330     @@ -897,6 +896,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
1331     sizeof(struct buffer_head *),
1332     GFP_NOFS);
1333     if (!n_group_desc) {
1334     + brelse(gdb_bh);
1335     err = -ENOMEM;
1336     ext4_warning(sb, "not enough memory for %lu groups",
1337     gdb_num + 1);
1338     @@ -912,8 +912,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
1339     kvfree(o_group_desc);
1340     BUFFER_TRACE(gdb_bh, "get_write_access");
1341     err = ext4_journal_get_write_access(handle, gdb_bh);
1342     - if (unlikely(err))
1343     - brelse(gdb_bh);
1344     return err;
1345     }
1346    
1347     @@ -1095,8 +1093,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1348     backup_block, backup_block -
1349     ext4_group_first_block_no(sb, group));
1350     BUFFER_TRACE(bh, "get_write_access");
1351     - if ((err = ext4_journal_get_write_access(handle, bh)))
1352     + if ((err = ext4_journal_get_write_access(handle, bh))) {
1353     + brelse(bh);
1354     break;
1355     + }
1356     lock_buffer(bh);
1357     memcpy(bh->b_data, data, size);
1358     if (rest)
1359     @@ -1991,7 +1991,7 @@ retry:
1360    
1361     err = ext4_alloc_flex_bg_array(sb, n_group + 1);
1362     if (err)
1363     - return err;
1364     + goto out;
1365    
1366     err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
1367     if (err)
1368     @@ -2027,6 +2027,10 @@ retry:
1369     n_blocks_count_retry = 0;
1370     free_flex_gd(flex_gd);
1371     flex_gd = NULL;
1372     + if (resize_inode) {
1373     + iput(resize_inode);
1374     + resize_inode = NULL;
1375     + }
1376     goto retry;
1377     }
1378    
1379     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1380     index f88d4804c3a8..75177eb498ed 100644
1381     --- a/fs/ext4/super.c
1382     +++ b/fs/ext4/super.c
1383     @@ -3897,6 +3897,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1384     sbi->s_groups_count = blocks_count;
1385     sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1386     (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1387     + if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
1388     + le32_to_cpu(es->s_inodes_count)) {
1389     + ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
1390     + le32_to_cpu(es->s_inodes_count),
1391     + ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
1392     + ret = -EINVAL;
1393     + goto failed_mount;
1394     + }
1395     db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
1396     EXT4_DESC_PER_BLOCK(sb);
1397     if (ext4_has_feature_meta_bg(sb)) {
1398     @@ -3916,14 +3924,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
1399     ret = -ENOMEM;
1400     goto failed_mount;
1401     }
1402     - if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
1403     - le32_to_cpu(es->s_inodes_count)) {
1404     - ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
1405     - le32_to_cpu(es->s_inodes_count),
1406     - ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
1407     - ret = -EINVAL;
1408     - goto failed_mount;
1409     - }
1410    
1411     bgl_lock_init(sbi->s_blockgroup_lock);
1412    
1413     @@ -4305,6 +4305,7 @@ failed_mount6:
1414     percpu_counter_destroy(&sbi->s_freeinodes_counter);
1415     percpu_counter_destroy(&sbi->s_dirs_counter);
1416     percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
1417     + percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
1418     failed_mount5:
1419     ext4_ext_release(sb);
1420     ext4_release_system_zone(sb);
1421     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
1422     index 38385bcb9148..22f765069655 100644
1423     --- a/fs/ext4/xattr.c
1424     +++ b/fs/ext4/xattr.c
1425     @@ -1221,6 +1221,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
1426     error = ext4_xattr_block_set(handle, inode, &i, &bs);
1427     } else if (error == -ENOSPC) {
1428     if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
1429     + brelse(bs.bh);
1430     + bs.bh = NULL;
1431     error = ext4_xattr_block_find(inode, &i, &bs);
1432     if (error)
1433     goto cleanup;
1434     @@ -1391,6 +1393,8 @@ out:
1435     kfree(buffer);
1436     if (is)
1437     brelse(is->iloc.bh);
1438     + if (bs)
1439     + brelse(bs->bh);
1440     kfree(is);
1441     kfree(bs);
1442    
1443     diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
1444     index c94bab6103f5..b4253181b5d4 100644
1445     --- a/fs/fuse/dev.c
1446     +++ b/fs/fuse/dev.c
1447     @@ -383,12 +383,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
1448     if (test_bit(FR_BACKGROUND, &req->flags)) {
1449     spin_lock(&fc->lock);
1450     clear_bit(FR_BACKGROUND, &req->flags);
1451     - if (fc->num_background == fc->max_background)
1452     + if (fc->num_background == fc->max_background) {
1453     fc->blocked = 0;
1454     -
1455     - /* Wake up next waiter, if any */
1456     - if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
1457     wake_up(&fc->blocked_waitq);
1458     + } else if (!fc->blocked) {
1459     + /*
1460     + * Wake up next waiter, if any. It's okay to use
1461     + * waitqueue_active(), as we've already synced up
1462     + * fc->blocked with waiters with the wake_up() call
1463     + * above.
1464     + */
1465     + if (waitqueue_active(&fc->blocked_waitq))
1466     + wake_up(&fc->blocked_waitq);
1467     + }
1468    
1469     if (fc->num_background == fc->congestion_threshold &&
1470     fc->connected && fc->bdi_initialized) {
1471     @@ -1303,12 +1310,14 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1472     goto out_end;
1473     }
1474     list_move_tail(&req->list, &fpq->processing);
1475     - spin_unlock(&fpq->lock);
1476     + __fuse_get_request(req);
1477     set_bit(FR_SENT, &req->flags);
1478     + spin_unlock(&fpq->lock);
1479     /* matches barrier in request_wait_answer() */
1480     smp_mb__after_atomic();
1481     if (test_bit(FR_INTERRUPTED, &req->flags))
1482     queue_interrupt(fiq, req);
1483     + fuse_put_request(fc, req);
1484    
1485     return reqsize;
1486    
1487     @@ -1706,8 +1715,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1488     req->in.args[1].size = total_len;
1489    
1490     err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1491     - if (err)
1492     + if (err) {
1493     fuse_retrieve_end(fc, req);
1494     + fuse_put_request(fc, req);
1495     + }
1496    
1497     return err;
1498     }
1499     @@ -1866,16 +1877,20 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1500    
1501     /* Is it an interrupt reply? */
1502     if (req->intr_unique == oh.unique) {
1503     + __fuse_get_request(req);
1504     spin_unlock(&fpq->lock);
1505    
1506     err = -EINVAL;
1507     - if (nbytes != sizeof(struct fuse_out_header))
1508     + if (nbytes != sizeof(struct fuse_out_header)) {
1509     + fuse_put_request(fc, req);
1510     goto err_finish;
1511     + }
1512    
1513     if (oh.error == -ENOSYS)
1514     fc->no_interrupt = 1;
1515     else if (oh.error == -EAGAIN)
1516     queue_interrupt(&fc->iq, req);
1517     + fuse_put_request(fc, req);
1518    
1519     fuse_copy_finish(cs);
1520     return nbytes;
1521     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
1522     index 4408abf6675b..1cd46e667e3d 100644
1523     --- a/fs/fuse/file.c
1524     +++ b/fs/fuse/file.c
1525     @@ -2900,10 +2900,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
1526     }
1527    
1528     if (io->async) {
1529     + bool blocking = io->blocking;
1530     +
1531     fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
1532    
1533     /* we have a non-extending, async request, so return */
1534     - if (!io->blocking)
1535     + if (!blocking)
1536     return -EIOCBQUEUED;
1537    
1538     wait_for_completion(&wait);
1539     diff --git a/fs/namespace.c b/fs/namespace.c
1540     index 0a9e766b4087..41f906a6f5d9 100644
1541     --- a/fs/namespace.c
1542     +++ b/fs/namespace.c
1543     @@ -1599,8 +1599,13 @@ static int do_umount(struct mount *mnt, int flags)
1544    
1545     namespace_lock();
1546     lock_mount_hash();
1547     - event++;
1548    
1549     + /* Recheck MNT_LOCKED with the locks held */
1550     + retval = -EINVAL;
1551     + if (mnt->mnt.mnt_flags & MNT_LOCKED)
1552     + goto out;
1553     +
1554     + event++;
1555     if (flags & MNT_DETACH) {
1556     if (!list_empty(&mnt->mnt_list))
1557     umount_tree(mnt, UMOUNT_PROPAGATE);
1558     @@ -1614,6 +1619,7 @@ static int do_umount(struct mount *mnt, int flags)
1559     retval = 0;
1560     }
1561     }
1562     +out:
1563     unlock_mount_hash();
1564     namespace_unlock();
1565     return retval;
1566     @@ -1704,7 +1710,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1567     goto dput_and_out;
1568     if (!check_mnt(mnt))
1569     goto dput_and_out;
1570     - if (mnt->mnt.mnt_flags & MNT_LOCKED)
1571     + if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
1572     goto dput_and_out;
1573     retval = -EPERM;
1574     if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
1575     @@ -1782,8 +1788,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1576     for (s = r; s; s = next_mnt(s, r)) {
1577     if (!(flag & CL_COPY_UNBINDABLE) &&
1578     IS_MNT_UNBINDABLE(s)) {
1579     - s = skip_mnt_tree(s);
1580     - continue;
1581     + if (s->mnt.mnt_flags & MNT_LOCKED) {
1582     + /* Both unbindable and locked. */
1583     + q = ERR_PTR(-EPERM);
1584     + goto out;
1585     + } else {
1586     + s = skip_mnt_tree(s);
1587     + continue;
1588     + }
1589     }
1590     if (!(flag & CL_COPY_MNT_NS_FILE) &&
1591     is_mnt_ns_file(s->mnt.mnt_root)) {
1592     @@ -1836,7 +1848,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
1593     {
1594     namespace_lock();
1595     lock_mount_hash();
1596     - umount_tree(real_mount(mnt), UMOUNT_SYNC);
1597     + umount_tree(real_mount(mnt), 0);
1598     unlock_mount_hash();
1599     namespace_unlock();
1600     }
1601     diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
1602     index e9495516527d..66985a6a7047 100644
1603     --- a/fs/nfsd/nfs4proc.c
1604     +++ b/fs/nfsd/nfs4proc.c
1605     @@ -1016,6 +1016,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1606     {
1607     __be32 status;
1608    
1609     + if (!cstate->save_fh.fh_dentry)
1610     + return nfserr_nofilehandle;
1611     +
1612     status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
1613     src_stateid, RD_STATE, src, NULL);
1614     if (status) {
1615     diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
1616     index 3ecb9f337b7d..20e610419501 100644
1617     --- a/fs/ocfs2/dir.c
1618     +++ b/fs/ocfs2/dir.c
1619     @@ -1896,8 +1896,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
1620     /* On error, skip the f_pos to the
1621     next block. */
1622     ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
1623     - brelse(bh);
1624     - continue;
1625     + break;
1626     }
1627     if (le64_to_cpu(de->inode)) {
1628     unsigned char d_type = DT_UNKNOWN;
1629     diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
1630     index 1816c5e26581..a8a574897d3c 100644
1631     --- a/include/linux/ceph/libceph.h
1632     +++ b/include/linux/ceph/libceph.h
1633     @@ -77,7 +77,13 @@ struct ceph_options {
1634    
1635     #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
1636     #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
1637     -#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
1638     +
1639     +/*
1640     + * Handle the largest possible rbd object in one message.
1641     + * There is no limit on the size of cephfs objects, but it has to obey
1642     + * rsize and wsize mount options anyway.
1643     + */
1644     +#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
1645    
1646     #define CEPH_AUTH_NAME_DEFAULT "guest"
1647    
1648     diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
1649     index 48c76d612d40..b699d59d0f4f 100644
1650     --- a/include/linux/hugetlb.h
1651     +++ b/include/linux/hugetlb.h
1652     @@ -109,6 +109,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
1653     unsigned long addr, unsigned long sz);
1654     pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
1655     int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
1656     +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1657     + unsigned long *start, unsigned long *end);
1658     struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
1659     int write);
1660     struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
1661     @@ -131,6 +133,18 @@ static inline unsigned long hugetlb_total_pages(void)
1662     return 0;
1663     }
1664    
1665     +static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
1666     + pte_t *ptep)
1667     +{
1668     + return 0;
1669     +}
1670     +
1671     +static inline void adjust_range_if_pmd_sharing_possible(
1672     + struct vm_area_struct *vma,
1673     + unsigned long *start, unsigned long *end)
1674     +{
1675     +}
1676     +
1677     #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
1678     #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
1679     #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
1680     diff --git a/include/linux/i8253.h b/include/linux/i8253.h
1681     index e6bb36a97519..8336b2f6f834 100644
1682     --- a/include/linux/i8253.h
1683     +++ b/include/linux/i8253.h
1684     @@ -21,6 +21,7 @@
1685     #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
1686    
1687     extern raw_spinlock_t i8253_lock;
1688     +extern bool i8253_clear_counter_on_shutdown;
1689     extern struct clock_event_device i8253_clockevent;
1690     extern void clockevent_i8253_init(bool oneshot);
1691    
1692     diff --git a/include/linux/mm.h b/include/linux/mm.h
1693     index 493d07931ea5..11a5a46ce72b 100644
1694     --- a/include/linux/mm.h
1695     +++ b/include/linux/mm.h
1696     @@ -2187,6 +2187,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1697     return vma;
1698     }
1699    
1700     +static inline bool range_in_vma(struct vm_area_struct *vma,
1701     + unsigned long start, unsigned long end)
1702     +{
1703     + return (vma && vma->vm_start <= start && end <= vma->vm_end);
1704     +}
1705     +
1706     #ifdef CONFIG_MMU
1707     pgprot_t vm_get_page_prot(unsigned long vm_flags);
1708     void vma_set_page_prot(struct vm_area_struct *vma);
1709     diff --git a/lib/ubsan.c b/lib/ubsan.c
1710     index 50d1d5c25deb..60e108c5c173 100644
1711     --- a/lib/ubsan.c
1712     +++ b/lib/ubsan.c
1713     @@ -451,8 +451,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
1714     EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
1715    
1716    
1717     -void __noreturn
1718     -__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
1719     +void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
1720     {
1721     unsigned long flags;
1722    
1723     diff --git a/mm/gup.c b/mm/gup.c
1724     index be4ccddac26f..d71da7216c6e 100644
1725     --- a/mm/gup.c
1726     +++ b/mm/gup.c
1727     @@ -1122,8 +1122,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1728     int locked = 0;
1729     long ret = 0;
1730    
1731     - VM_BUG_ON(start & ~PAGE_MASK);
1732     - VM_BUG_ON(len != PAGE_ALIGN(len));
1733     end = start + len;
1734    
1735     for (nstart = start; nstart < end; nstart = nend) {
1736     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1737     index 9c566e4b06ce..5e3a4db36310 100644
1738     --- a/mm/hugetlb.c
1739     +++ b/mm/hugetlb.c
1740     @@ -3220,7 +3220,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
1741     int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1742     struct vm_area_struct *vma)
1743     {
1744     - pte_t *src_pte, *dst_pte, entry;
1745     + pte_t *src_pte, *dst_pte, entry, dst_entry;
1746     struct page *ptepage;
1747     unsigned long addr;
1748     int cow;
1749     @@ -3248,15 +3248,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1750     break;
1751     }
1752    
1753     - /* If the pagetables are shared don't copy or take references */
1754     - if (dst_pte == src_pte)
1755     + /*
1756     + * If the pagetables are shared don't copy or take references.
1757     + * dst_pte == src_pte is the common case of src/dest sharing.
1758     + *
1759     + * However, src could have 'unshared' and dst shares with
1760     + * another vma. If dst_pte !none, this implies sharing.
1761     + * Check here before taking page table lock, and once again
1762     + * after taking the lock below.
1763     + */
1764     + dst_entry = huge_ptep_get(dst_pte);
1765     + if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
1766     continue;
1767    
1768     dst_ptl = huge_pte_lock(h, dst, dst_pte);
1769     src_ptl = huge_pte_lockptr(h, src, src_pte);
1770     spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1771     entry = huge_ptep_get(src_pte);
1772     - if (huge_pte_none(entry)) { /* skip none entry */
1773     + dst_entry = huge_ptep_get(dst_pte);
1774     + if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
1775     + /*
1776     + * Skip if src entry none. Also, skip in the
1777     + * unlikely case dst entry !none as this implies
1778     + * sharing with another vma.
1779     + */
1780     ;
1781     } else if (unlikely(is_hugetlb_entry_migration(entry) ||
1782     is_hugetlb_entry_hwpoisoned(entry))) {
1783     @@ -4318,12 +4333,40 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
1784     /*
1785     * check on proper vm_flags and page table alignment
1786     */
1787     - if (vma->vm_flags & VM_MAYSHARE &&
1788     - vma->vm_start <= base && end <= vma->vm_end)
1789     + if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
1790     return true;
1791     return false;
1792     }
1793    
1794     +/*
1795     + * Determine if start,end range within vma could be mapped by shared pmd.
1796     + * If yes, adjust start and end to cover range associated with possible
1797     + * shared pmd mappings.
1798     + */
1799     +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1800     + unsigned long *start, unsigned long *end)
1801     +{
1802     + unsigned long check_addr = *start;
1803     +
1804     + if (!(vma->vm_flags & VM_MAYSHARE))
1805     + return;
1806     +
1807     + for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
1808     + unsigned long a_start = check_addr & PUD_MASK;
1809     + unsigned long a_end = a_start + PUD_SIZE;
1810     +
1811     + /*
1812     + * If sharing is possible, adjust start/end if necessary.
1813     + */
1814     + if (range_in_vma(vma, a_start, a_end)) {
1815     + if (a_start < *start)
1816     + *start = a_start;
1817     + if (a_end > *end)
1818     + *end = a_end;
1819     + }
1820     + }
1821     +}
1822     +
1823     /*
1824     * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
1825     * and returns the corresponding pte. While this is not necessary for the
1826     @@ -4420,6 +4463,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
1827     {
1828     return 0;
1829     }
1830     +
1831     +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1832     + unsigned long *start, unsigned long *end)
1833     +{
1834     +}
1835     #define want_pmd_share() (0)
1836     #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
1837    
1838     diff --git a/mm/mempolicy.c b/mm/mempolicy.c
1839     index 69c4a0c92ebb..e21d9b44247b 100644
1840     --- a/mm/mempolicy.c
1841     +++ b/mm/mempolicy.c
1842     @@ -2027,8 +2027,36 @@ retry_cpuset:
1843     nmask = policy_nodemask(gfp, pol);
1844     if (!nmask || node_isset(hpage_node, *nmask)) {
1845     mpol_cond_put(pol);
1846     - page = __alloc_pages_node(hpage_node,
1847     - gfp | __GFP_THISNODE, order);
1848     + /*
1849     + * We cannot invoke reclaim if __GFP_THISNODE
1850     + * is set. Invoking reclaim with
1851     + * __GFP_THISNODE set, would cause THP
1852     + * allocations to trigger heavy swapping
1853     + * despite there may be tons of free memory
1854     + * (including potentially plenty of THP
1855     + * already available in the buddy) on all the
1856     + * other NUMA nodes.
1857     + *
1858     + * At most we could invoke compaction when
1859     + * __GFP_THISNODE is set (but we would need to
1860     + * refrain from invoking reclaim even if
1861     + * compaction returned COMPACT_SKIPPED because
1862     + * there wasn't not enough memory to succeed
1863     + * compaction). For now just avoid
1864     + * __GFP_THISNODE instead of limiting the
1865     + * allocation path to a strict and single
1866     + * compaction invocation.
1867     + *
1868     + * Supposedly if direct reclaim was enabled by
1869     + * the caller, the app prefers THP regardless
1870     + * of the node it comes from so this would be
1871     + * more desiderable behavior than only
1872     + * providing THP originated from the local
1873     + * node in such case.
1874     + */
1875     + if (!(gfp & __GFP_DIRECT_RECLAIM))
1876     + gfp |= __GFP_THISNODE;
1877     + page = __alloc_pages_node(hpage_node, gfp, order);
1878     goto out;
1879     }
1880     }
1881     diff --git a/mm/mmap.c b/mm/mmap.c
1882     index aa97074a4a99..283755645d17 100644
1883     --- a/mm/mmap.c
1884     +++ b/mm/mmap.c
1885     @@ -2876,21 +2876,15 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
1886     * anonymous maps. eventually we may be able to do some
1887     * brk-specific accounting here.
1888     */
1889     -static int do_brk(unsigned long addr, unsigned long request)
1890     +static int do_brk(unsigned long addr, unsigned long len)
1891     {
1892     struct mm_struct *mm = current->mm;
1893     struct vm_area_struct *vma, *prev;
1894     - unsigned long flags, len;
1895     + unsigned long flags;
1896     struct rb_node **rb_link, *rb_parent;
1897     pgoff_t pgoff = addr >> PAGE_SHIFT;
1898     int error;
1899    
1900     - len = PAGE_ALIGN(request);
1901     - if (len < request)
1902     - return -ENOMEM;
1903     - if (!len)
1904     - return 0;
1905     -
1906     flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1907    
1908     error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
1909     @@ -2959,12 +2953,19 @@ out:
1910     return 0;
1911     }
1912    
1913     -int vm_brk(unsigned long addr, unsigned long len)
1914     +int vm_brk(unsigned long addr, unsigned long request)
1915     {
1916     struct mm_struct *mm = current->mm;
1917     + unsigned long len;
1918     int ret;
1919     bool populate;
1920    
1921     + len = PAGE_ALIGN(request);
1922     + if (len < request)
1923     + return -ENOMEM;
1924     + if (!len)
1925     + return 0;
1926     +
1927     if (down_write_killable(&mm->mmap_sem))
1928     return -EINTR;
1929    
1930     diff --git a/mm/rmap.c b/mm/rmap.c
1931     index 94488b0362f8..a7276d8c96f3 100644
1932     --- a/mm/rmap.c
1933     +++ b/mm/rmap.c
1934     @@ -1476,6 +1476,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1935     pte_t pteval;
1936     spinlock_t *ptl;
1937     int ret = SWAP_AGAIN;
1938     + unsigned long sh_address;
1939     + bool pmd_sharing_possible = false;
1940     + unsigned long spmd_start, spmd_end;
1941     struct rmap_private *rp = arg;
1942     enum ttu_flags flags = rp->flags;
1943    
1944     @@ -1491,6 +1494,32 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1945     goto out;
1946     }
1947    
1948     + /*
1949     + * Only use the range_start/end mmu notifiers if huge pmd sharing
1950     + * is possible. In the normal case, mmu_notifier_invalidate_page
1951     + * is sufficient as we only unmap a page. However, if we unshare
1952     + * a pmd, we will unmap a PUD_SIZE range.
1953     + */
1954     + if (PageHuge(page)) {
1955     + spmd_start = address;
1956     + spmd_end = spmd_start + vma_mmu_pagesize(vma);
1957     +
1958     + /*
1959     + * Check if pmd sharing is possible. If possible, we could
1960     + * unmap a PUD_SIZE range. spmd_start/spmd_end will be
1961     + * modified if sharing is possible.
1962     + */
1963     + adjust_range_if_pmd_sharing_possible(vma, &spmd_start,
1964     + &spmd_end);
1965     + if (spmd_end - spmd_start != vma_mmu_pagesize(vma)) {
1966     + sh_address = address;
1967     +
1968     + pmd_sharing_possible = true;
1969     + mmu_notifier_invalidate_range_start(vma->vm_mm,
1970     + spmd_start, spmd_end);
1971     + }
1972     + }
1973     +
1974     pte = page_check_address(page, mm, address, &ptl,
1975     PageTransCompound(page));
1976     if (!pte)
1977     @@ -1524,6 +1553,30 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1978     }
1979     }
1980    
1981     + /*
1982     + * Call huge_pmd_unshare to potentially unshare a huge pmd. Pass
1983     + * sh_address as it will be modified if unsharing is successful.
1984     + */
1985     + if (PageHuge(page) && huge_pmd_unshare(mm, &sh_address, pte)) {
1986     + /*
1987     + * huge_pmd_unshare unmapped an entire PMD page. There is
1988     + * no way of knowing exactly which PMDs may be cached for
1989     + * this mm, so flush them all. spmd_start/spmd_end cover
1990     + * this PUD_SIZE range.
1991     + */
1992     + flush_cache_range(vma, spmd_start, spmd_end);
1993     + flush_tlb_range(vma, spmd_start, spmd_end);
1994     +
1995     + /*
1996     + * The ref count of the PMD page was dropped which is part
1997     + * of the way map counting is done for shared PMDs. When
1998     + * there is no other sharing, huge_pmd_unshare returns false
1999     + * and we will unmap the actual page and drop map count
2000     + * to zero.
2001     + */
2002     + goto out_unmap;
2003     + }
2004     +
2005     /* Nuke the page table entry. */
2006     flush_cache_page(vma, address, page_to_pfn(page));
2007     if (should_defer_flush(mm, flags)) {
2008     @@ -1621,6 +1674,9 @@ out_unmap:
2009     if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
2010     mmu_notifier_invalidate_page(mm, address);
2011     out:
2012     + if (pmd_sharing_possible)
2013     + mmu_notifier_invalidate_range_end(vma->vm_mm,
2014     + spmd_start, spmd_end);
2015     return ret;
2016     }
2017    
2018     diff --git a/net/9p/protocol.c b/net/9p/protocol.c
2019     index 16d287565987..145f80518064 100644
2020     --- a/net/9p/protocol.c
2021     +++ b/net/9p/protocol.c
2022     @@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
2023     void p9stat_free(struct p9_wstat *stbuf)
2024     {
2025     kfree(stbuf->name);
2026     + stbuf->name = NULL;
2027     kfree(stbuf->uid);
2028     + stbuf->uid = NULL;
2029     kfree(stbuf->gid);
2030     + stbuf->gid = NULL;
2031     kfree(stbuf->muid);
2032     + stbuf->muid = NULL;
2033     kfree(stbuf->extension);
2034     + stbuf->extension = NULL;
2035     }
2036     EXPORT_SYMBOL(p9stat_free);
2037    
2038     diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2039     index db3586ba1211..19b3f4fbea52 100644
2040     --- a/net/netfilter/nf_conntrack_core.c
2041     +++ b/net/netfilter/nf_conntrack_core.c
2042     @@ -918,19 +918,22 @@ static unsigned int early_drop_list(struct net *net,
2043     return drops;
2044     }
2045    
2046     -static noinline int early_drop(struct net *net, unsigned int _hash)
2047     +static noinline int early_drop(struct net *net, unsigned int hash)
2048     {
2049     - unsigned int i;
2050     + unsigned int i, bucket;
2051    
2052     for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
2053     struct hlist_nulls_head *ct_hash;
2054     - unsigned int hash, hsize, drops;
2055     + unsigned int hsize, drops;
2056    
2057     rcu_read_lock();
2058     nf_conntrack_get_ht(&ct_hash, &hsize);
2059     - hash = reciprocal_scale(_hash++, hsize);
2060     + if (!i)
2061     + bucket = reciprocal_scale(hash, hsize);
2062     + else
2063     + bucket = (bucket + 1) % hsize;
2064    
2065     - drops = early_drop_list(net, &ct_hash[hash]);
2066     + drops = early_drop_list(net, &ct_hash[bucket]);
2067     rcu_read_unlock();
2068    
2069     if (drops) {
2070     diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
2071     index 7f1071e103ca..1b38fc486351 100644
2072     --- a/net/sunrpc/xdr.c
2073     +++ b/net/sunrpc/xdr.c
2074     @@ -639,11 +639,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
2075     WARN_ON_ONCE(xdr->iov);
2076     return;
2077     }
2078     - if (fraglen) {
2079     + if (fraglen)
2080     xdr->end = head->iov_base + head->iov_len;
2081     - xdr->page_ptr--;
2082     - }
2083     /* (otherwise assume xdr->end is already set) */
2084     + xdr->page_ptr--;
2085     head->iov_len = len;
2086     buf->len = len;
2087     xdr->p = head->iov_base + head->iov_len;
2088     diff --git a/tools/testing/selftests/powerpc/tm/tm-tmspr.c b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
2089     index 2bda81c7bf23..df1d7d4b1c89 100644
2090     --- a/tools/testing/selftests/powerpc/tm/tm-tmspr.c
2091     +++ b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
2092     @@ -98,7 +98,7 @@ void texasr(void *in)
2093    
2094     int test_tmspr()
2095     {
2096     - pthread_t thread;
2097     + pthread_t *thread;
2098     int thread_num;
2099     unsigned long i;
2100    
2101     @@ -107,21 +107,28 @@ int test_tmspr()
2102     /* To cause some context switching */
2103     thread_num = 10 * sysconf(_SC_NPROCESSORS_ONLN);
2104    
2105     + thread = malloc(thread_num * sizeof(pthread_t));
2106     + if (thread == NULL)
2107     + return EXIT_FAILURE;
2108     +
2109     /* Test TFIAR and TFHAR */
2110     - for (i = 0 ; i < thread_num ; i += 2){
2111     - if (pthread_create(&thread, NULL, (void*)tfiar_tfhar, (void *)i))
2112     + for (i = 0; i < thread_num; i += 2) {
2113     + if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar,
2114     + (void *)i))
2115     return EXIT_FAILURE;
2116     }
2117     - if (pthread_join(thread, NULL) != 0)
2118     - return EXIT_FAILURE;
2119     -
2120     /* Test TEXASR */
2121     - for (i = 0 ; i < thread_num ; i++){
2122     - if (pthread_create(&thread, NULL, (void*)texasr, (void *)i))
2123     + for (i = 1; i < thread_num; i += 2) {
2124     + if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i))
2125     return EXIT_FAILURE;
2126     }
2127     - if (pthread_join(thread, NULL) != 0)
2128     - return EXIT_FAILURE;
2129     +
2130     + for (i = 0; i < thread_num; i++) {
2131     + if (pthread_join(thread[i], NULL) != 0)
2132     + return EXIT_FAILURE;
2133     + }
2134     +
2135     + free(thread);
2136    
2137     if (passed)
2138     return 0;