Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0113-4.9.14-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 220480 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index 14dc2758345b..5e7706e94622 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 13
9     +SUBLEVEL = 14
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
14     index 0b9a59d5fdac..30fac04289a5 100644
15     --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
16     +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
17     @@ -148,6 +148,8 @@
18     uart1: serial@f8020000 {
19     pinctrl-names = "default";
20     pinctrl-0 = <&pinctrl_uart1_default>;
21     + atmel,use-dma-rx;
22     + atmel,use-dma-tx;
23     status = "okay";
24     };
25    
26     diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
27     index ed7fce297738..44d1171c7fc0 100644
28     --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
29     +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
30     @@ -110,6 +110,8 @@
31     };
32    
33     usart3: serial@fc00c000 {
34     + atmel,use-dma-rx;
35     + atmel,use-dma-tx;
36     status = "okay";
37     };
38    
39     diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
40     index 74a44727f8e1..a58bbaa3ec60 100644
41     --- a/arch/arm/include/asm/kvm_mmu.h
42     +++ b/arch/arm/include/asm/kvm_mmu.h
43     @@ -150,18 +150,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
44     * and iterate over the range.
45     */
46    
47     - bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
48     -
49     VM_BUG_ON(size & ~PAGE_MASK);
50    
51     - if (!need_flush && !icache_is_pipt())
52     - goto vipt_cache;
53     -
54     while (size) {
55     void *va = kmap_atomic_pfn(pfn);
56    
57     - if (need_flush)
58     - kvm_flush_dcache_to_poc(va, PAGE_SIZE);
59     + kvm_flush_dcache_to_poc(va, PAGE_SIZE);
60    
61     if (icache_is_pipt())
62     __cpuc_coherent_user_range((unsigned long)va,
63     @@ -173,7 +167,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
64     kunmap_atomic(va);
65     }
66    
67     -vipt_cache:
68     if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
69     /* any kind of VIPT cache */
70     __flush_icache_all();
71     diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
72     index 6f72fe8b0e3e..6d22017ebbad 100644
73     --- a/arch/arm64/include/asm/kvm_mmu.h
74     +++ b/arch/arm64/include/asm/kvm_mmu.h
75     @@ -241,8 +241,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
76     {
77     void *va = page_address(pfn_to_page(pfn));
78    
79     - if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
80     - kvm_flush_dcache_to_poc(va, size);
81     + kvm_flush_dcache_to_poc(va, size);
82    
83     if (!icache_is_aliasing()) { /* PIPT */
84     flush_icache_range((unsigned long)va,
85     diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
86     index c02504ea304b..3a129d48674e 100644
87     --- a/arch/arm64/kernel/cpufeature.c
88     +++ b/arch/arm64/kernel/cpufeature.c
89     @@ -653,15 +653,15 @@ static u64 __raw_read_system_reg(u32 sys_id)
90     case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1);
91     case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1);
92     case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1);
93     - case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR4_EL1);
94     + case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR5_EL1);
95     case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1);
96     case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1);
97     case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1);
98    
99     case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1);
100     - case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR0_EL1);
101     + case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR1_EL1);
102     case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1);
103     - case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR0_EL1);
104     + case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR1_EL1);
105     case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1);
106     case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1);
107     case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1);
108     diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
109     index 02265a589ef5..b5bf46ce873b 100644
110     --- a/arch/arm64/mm/dma-mapping.c
111     +++ b/arch/arm64/mm/dma-mapping.c
112     @@ -352,6 +352,13 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
113     return 1;
114     }
115    
116     +static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
117     +{
118     + if (swiotlb)
119     + return swiotlb_dma_mapping_error(hwdev, addr);
120     + return 0;
121     +}
122     +
123     static struct dma_map_ops swiotlb_dma_ops = {
124     .alloc = __dma_alloc,
125     .free = __dma_free,
126     @@ -366,7 +373,7 @@ static struct dma_map_ops swiotlb_dma_ops = {
127     .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
128     .sync_sg_for_device = __swiotlb_sync_sg_for_device,
129     .dma_supported = __swiotlb_dma_supported,
130     - .mapping_error = swiotlb_dma_mapping_error,
131     + .mapping_error = __swiotlb_dma_mapping_error,
132     };
133    
134     static int __init atomic_pool_init(void)
135     diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
136     index 52caa75bfe4e..e2f50d690624 100644
137     --- a/arch/mips/bcm47xx/buttons.c
138     +++ b/arch/mips/bcm47xx/buttons.c
139     @@ -17,6 +17,12 @@
140     .active_low = 1, \
141     }
142    
143     +#define BCM47XX_GPIO_KEY_H(_gpio, _code) \
144     + { \
145     + .code = _code, \
146     + .gpio = _gpio, \
147     + }
148     +
149     /* Asus */
150    
151     static const struct gpio_keys_button
152     @@ -79,8 +85,8 @@ bcm47xx_buttons_asus_wl500gpv2[] __initconst = {
153    
154     static const struct gpio_keys_button
155     bcm47xx_buttons_asus_wl500w[] __initconst = {
156     - BCM47XX_GPIO_KEY(6, KEY_RESTART),
157     - BCM47XX_GPIO_KEY(7, KEY_WPS_BUTTON),
158     + BCM47XX_GPIO_KEY_H(6, KEY_RESTART),
159     + BCM47XX_GPIO_KEY_H(7, KEY_WPS_BUTTON),
160     };
161    
162     static const struct gpio_keys_button
163     diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
164     index 64e08df51d65..8b7004132491 100644
165     --- a/arch/mips/cavium-octeon/octeon-memcpy.S
166     +++ b/arch/mips/cavium-octeon/octeon-memcpy.S
167     @@ -208,18 +208,18 @@ EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
168     ADD src, src, 16*NBYTES
169     EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
170     ADD dst, dst, 16*NBYTES
171     -EXC( LOAD t0, UNIT(-8)(src), l_exc_copy)
172     -EXC( LOAD t1, UNIT(-7)(src), l_exc_copy)
173     -EXC( LOAD t2, UNIT(-6)(src), l_exc_copy)
174     -EXC( LOAD t3, UNIT(-5)(src), l_exc_copy)
175     +EXC( LOAD t0, UNIT(-8)(src), l_exc_copy_rewind16)
176     +EXC( LOAD t1, UNIT(-7)(src), l_exc_copy_rewind16)
177     +EXC( LOAD t2, UNIT(-6)(src), l_exc_copy_rewind16)
178     +EXC( LOAD t3, UNIT(-5)(src), l_exc_copy_rewind16)
179     EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
180     EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
181     EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
182     EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
183     -EXC( LOAD t0, UNIT(-4)(src), l_exc_copy)
184     -EXC( LOAD t1, UNIT(-3)(src), l_exc_copy)
185     -EXC( LOAD t2, UNIT(-2)(src), l_exc_copy)
186     -EXC( LOAD t3, UNIT(-1)(src), l_exc_copy)
187     +EXC( LOAD t0, UNIT(-4)(src), l_exc_copy_rewind16)
188     +EXC( LOAD t1, UNIT(-3)(src), l_exc_copy_rewind16)
189     +EXC( LOAD t2, UNIT(-2)(src), l_exc_copy_rewind16)
190     +EXC( LOAD t3, UNIT(-1)(src), l_exc_copy_rewind16)
191     EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
192     EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
193     EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
194     @@ -383,6 +383,10 @@ done:
195     nop
196     END(memcpy)
197    
198     +l_exc_copy_rewind16:
199     + /* Rewind src and dst by 16*NBYTES for l_exc_copy */
200     + SUB src, src, 16*NBYTES
201     + SUB dst, dst, 16*NBYTES
202     l_exc_copy:
203     /*
204     * Copy bytes from src until faulting load address (or until a
205     diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
206     index bce1ce53149a..0e231970653a 100644
207     --- a/arch/mips/include/asm/checksum.h
208     +++ b/arch/mips/include/asm/checksum.h
209     @@ -186,7 +186,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
210     " daddu %0, %4 \n"
211     " dsll32 $1, %0, 0 \n"
212     " daddu %0, $1 \n"
213     + " sltu $1, %0, $1 \n"
214     " dsra32 %0, %0, 0 \n"
215     + " addu %0, $1 \n"
216     #endif
217     " .set pop"
218     : "=r" (sum)
219     diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
220     index 9514e5f2209f..1652f36acad1 100644
221     --- a/arch/mips/kernel/process.c
222     +++ b/arch/mips/kernel/process.c
223     @@ -195,11 +195,9 @@ struct mips_frame_info {
224     #define J_TARGET(pc,target) \
225     (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
226    
227     -static inline int is_ra_save_ins(union mips_instruction *ip)
228     +static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
229     {
230     #ifdef CONFIG_CPU_MICROMIPS
231     - union mips_instruction mmi;
232     -
233     /*
234     * swsp ra,offset
235     * swm16 reglist,offset(sp)
236     @@ -209,29 +207,71 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
237     *
238     * microMIPS is way more fun...
239     */
240     - if (mm_insn_16bit(ip->halfword[0])) {
241     - mmi.word = (ip->halfword[0] << 16);
242     - return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
243     - mmi.mm16_r5_format.rt == 31) ||
244     - (mmi.mm16_m_format.opcode == mm_pool16c_op &&
245     - mmi.mm16_m_format.func == mm_swm16_op);
246     + if (mm_insn_16bit(ip->halfword[1])) {
247     + switch (ip->mm16_r5_format.opcode) {
248     + case mm_swsp16_op:
249     + if (ip->mm16_r5_format.rt != 31)
250     + return 0;
251     +
252     + *poff = ip->mm16_r5_format.simmediate;
253     + *poff = (*poff << 2) / sizeof(ulong);
254     + return 1;
255     +
256     + case mm_pool16c_op:
257     + switch (ip->mm16_m_format.func) {
258     + case mm_swm16_op:
259     + *poff = ip->mm16_m_format.imm;
260     + *poff += 1 + ip->mm16_m_format.rlist;
261     + *poff = (*poff << 2) / sizeof(ulong);
262     + return 1;
263     +
264     + default:
265     + return 0;
266     + }
267     +
268     + default:
269     + return 0;
270     + }
271     }
272     - else {
273     - mmi.halfword[0] = ip->halfword[1];
274     - mmi.halfword[1] = ip->halfword[0];
275     - return (mmi.mm_m_format.opcode == mm_pool32b_op &&
276     - mmi.mm_m_format.rd > 9 &&
277     - mmi.mm_m_format.base == 29 &&
278     - mmi.mm_m_format.func == mm_swm32_func) ||
279     - (mmi.i_format.opcode == mm_sw32_op &&
280     - mmi.i_format.rs == 29 &&
281     - mmi.i_format.rt == 31);
282     +
283     + switch (ip->i_format.opcode) {
284     + case mm_sw32_op:
285     + if (ip->i_format.rs != 29)
286     + return 0;
287     + if (ip->i_format.rt != 31)
288     + return 0;
289     +
290     + *poff = ip->i_format.simmediate / sizeof(ulong);
291     + return 1;
292     +
293     + case mm_pool32b_op:
294     + switch (ip->mm_m_format.func) {
295     + case mm_swm32_func:
296     + if (ip->mm_m_format.rd < 0x10)
297     + return 0;
298     + if (ip->mm_m_format.base != 29)
299     + return 0;
300     +
301     + *poff = ip->mm_m_format.simmediate;
302     + *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
303     + *poff /= sizeof(ulong);
304     + return 1;
305     + default:
306     + return 0;
307     + }
308     +
309     + default:
310     + return 0;
311     }
312     #else
313     /* sw / sd $ra, offset($sp) */
314     - return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
315     - ip->i_format.rs == 29 &&
316     - ip->i_format.rt == 31;
317     + if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
318     + ip->i_format.rs == 29 && ip->i_format.rt == 31) {
319     + *poff = ip->i_format.simmediate / sizeof(ulong);
320     + return 1;
321     + }
322     +
323     + return 0;
324     #endif
325     }
326    
327     @@ -246,13 +286,16 @@ static inline int is_jump_ins(union mips_instruction *ip)
328     *
329     * microMIPS is kind of more fun...
330     */
331     - union mips_instruction mmi;
332     -
333     - mmi.word = (ip->halfword[0] << 16);
334     + if (mm_insn_16bit(ip->halfword[1])) {
335     + if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
336     + (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
337     + return 1;
338     + return 0;
339     + }
340    
341     - if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
342     - (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
343     - ip->j_format.opcode == mm_jal32_op)
344     + if (ip->j_format.opcode == mm_j32_op)
345     + return 1;
346     + if (ip->j_format.opcode == mm_jal32_op)
347     return 1;
348     if (ip->r_format.opcode != mm_pool32a_op ||
349     ip->r_format.func != mm_pool32axf_op)
350     @@ -280,15 +323,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
351     *
352     * microMIPS is not more fun...
353     */
354     - if (mm_insn_16bit(ip->halfword[0])) {
355     - union mips_instruction mmi;
356     -
357     - mmi.word = (ip->halfword[0] << 16);
358     - return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
359     - mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
360     - (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
361     - mmi.mm16_r5_format.rt == 29);
362     + if (mm_insn_16bit(ip->halfword[1])) {
363     + return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
364     + ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
365     + (ip->mm16_r5_format.opcode == mm_pool16d_op &&
366     + ip->mm16_r5_format.rt == 29);
367     }
368     +
369     return ip->mm_i_format.opcode == mm_addiu32_op &&
370     ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
371     #else
372     @@ -303,30 +344,36 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
373    
374     static int get_frame_info(struct mips_frame_info *info)
375     {
376     -#ifdef CONFIG_CPU_MICROMIPS
377     - union mips_instruction *ip = (void *) (((char *) info->func) - 1);
378     -#else
379     - union mips_instruction *ip = info->func;
380     -#endif
381     - unsigned max_insns = info->func_size / sizeof(union mips_instruction);
382     - unsigned i;
383     + bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
384     + union mips_instruction insn, *ip, *ip_end;
385     + const unsigned int max_insns = 128;
386     + unsigned int i;
387    
388     info->pc_offset = -1;
389     info->frame_size = 0;
390    
391     + ip = (void *)msk_isa16_mode((ulong)info->func);
392     if (!ip)
393     goto err;
394    
395     - if (max_insns == 0)
396     - max_insns = 128U; /* unknown function size */
397     - max_insns = min(128U, max_insns);
398     + ip_end = (void *)ip + info->func_size;
399    
400     - for (i = 0; i < max_insns; i++, ip++) {
401     + for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
402     + if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
403     + insn.halfword[0] = 0;
404     + insn.halfword[1] = ip->halfword[0];
405     + } else if (is_mmips) {
406     + insn.halfword[0] = ip->halfword[1];
407     + insn.halfword[1] = ip->halfword[0];
408     + } else {
409     + insn.word = ip->word;
410     + }
411    
412     - if (is_jump_ins(ip))
413     + if (is_jump_ins(&insn))
414     break;
415     +
416     if (!info->frame_size) {
417     - if (is_sp_move_ins(ip))
418     + if (is_sp_move_ins(&insn))
419     {
420     #ifdef CONFIG_CPU_MICROMIPS
421     if (mm_insn_16bit(ip->halfword[0]))
422     @@ -349,11 +396,9 @@ static int get_frame_info(struct mips_frame_info *info)
423     }
424     continue;
425     }
426     - if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
427     - info->pc_offset =
428     - ip->i_format.simmediate / sizeof(long);
429     + if (info->pc_offset == -1 &&
430     + is_ra_save_ins(&insn, &info->pc_offset))
431     break;
432     - }
433     }
434     if (info->frame_size && info->pc_offset >= 0) /* nested */
435     return 0;
436     diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
437     index 236193b5210b..9a61671c00a7 100644
438     --- a/arch/mips/lantiq/xway/sysctrl.c
439     +++ b/arch/mips/lantiq/xway/sysctrl.c
440     @@ -545,7 +545,7 @@ void __init ltq_soc_init(void)
441     clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
442     clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
443     clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
444     - clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH | PMU_PPE_DP);
445     + clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
446     clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
447     clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
448     } else if (of_machine_is_compatible("lantiq,ar10")) {
449     @@ -553,7 +553,7 @@ void __init ltq_soc_init(void)
450     ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
451     clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0);
452     clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1);
453     - clkdev_add_pmu("1e108000.eth", NULL, 1, 0, PMU_SWITCH |
454     + clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH |
455     PMU_PPE_DP | PMU_PPE_TC);
456     clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
457     clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY);
458     @@ -575,11 +575,11 @@ void __init ltq_soc_init(void)
459     clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
460    
461     clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
462     - clkdev_add_pmu("1e108000.eth", NULL, 1, 0,
463     + clkdev_add_pmu("1e108000.eth", NULL, 0, 0,
464     PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
465     PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
466     PMU_PPE_QSB | PMU_PPE_TOP);
467     - clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY);
468     + clkdev_add_pmu("1f203000.rcu", "gphy", 0, 0, PMU_GPHY);
469     clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
470     clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
471     clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
472     diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
473     index 026cb59a914d..f293a97cb885 100644
474     --- a/arch/mips/mm/sc-ip22.c
475     +++ b/arch/mips/mm/sc-ip22.c
476     @@ -31,26 +31,40 @@ static inline void indy_sc_wipe(unsigned long first, unsigned long last)
477     unsigned long tmp;
478    
479     __asm__ __volatile__(
480     - ".set\tpush\t\t\t# indy_sc_wipe\n\t"
481     - ".set\tnoreorder\n\t"
482     - ".set\tmips3\n\t"
483     - ".set\tnoat\n\t"
484     - "mfc0\t%2, $12\n\t"
485     - "li\t$1, 0x80\t\t\t# Go 64 bit\n\t"
486     - "mtc0\t$1, $12\n\t"
487     -
488     - "dli\t$1, 0x9000000080000000\n\t"
489     - "or\t%0, $1\t\t\t# first line to flush\n\t"
490     - "or\t%1, $1\t\t\t# last line to flush\n\t"
491     - ".set\tat\n\t"
492     -
493     - "1:\tsw\t$0, 0(%0)\n\t"
494     - "bne\t%0, %1, 1b\n\t"
495     - " daddu\t%0, 32\n\t"
496     -
497     - "mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t"
498     - "nop; nop; nop; nop;\n\t"
499     - ".set\tpop"
500     + " .set push # indy_sc_wipe \n"
501     + " .set noreorder \n"
502     + " .set mips3 \n"
503     + " .set noat \n"
504     + " mfc0 %2, $12 \n"
505     + " li $1, 0x80 # Go 64 bit \n"
506     + " mtc0 $1, $12 \n"
507     + " \n"
508     + " # \n"
509     + " # Open code a dli $1, 0x9000000080000000 \n"
510     + " # \n"
511     + " # Required because binutils 2.25 will happily accept \n"
512     + " # 64 bit instructions in .set mips3 mode but puke on \n"
513     + " # 64 bit constants when generating 32 bit ELF \n"
514     + " # \n"
515     + " lui $1,0x9000 \n"
516     + " dsll $1,$1,0x10 \n"
517     + " ori $1,$1,0x8000 \n"
518     + " dsll $1,$1,0x10 \n"
519     + " \n"
520     + " or %0, $1 # first line to flush \n"
521     + " or %1, $1 # last line to flush \n"
522     + " .set at \n"
523     + " \n"
524     + "1: sw $0, 0(%0) \n"
525     + " bne %0, %1, 1b \n"
526     + " daddu %0, 32 \n"
527     + " \n"
528     + " mtc0 %2, $12 # Back to 32 bit \n"
529     + " nop # pipeline hazard \n"
530     + " nop \n"
531     + " nop \n"
532     + " nop \n"
533     + " .set pop \n"
534     : "=r" (first), "=r" (last), "=&r" (tmp)
535     : "0" (first), "1" (last));
536     }
537     diff --git a/arch/mips/pic32/pic32mzda/Makefile b/arch/mips/pic32/pic32mzda/Makefile
538     index 4a4c2728c027..c28649615c6c 100644
539     --- a/arch/mips/pic32/pic32mzda/Makefile
540     +++ b/arch/mips/pic32/pic32mzda/Makefile
541     @@ -2,8 +2,7 @@
542     # Joshua Henderson, <joshua.henderson@microchip.com>
543     # Copyright (C) 2015 Microchip Technology, Inc. All rights reserved.
544     #
545     -obj-y := init.o time.o config.o
546     +obj-y := config.o early_clk.o init.o time.o
547    
548     obj-$(CONFIG_EARLY_PRINTK) += early_console.o \
549     - early_pin.o \
550     - early_clk.o
551     + early_pin.o
552     diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
553     index a244e09d2d88..5d22b0bef3d8 100644
554     --- a/arch/powerpc/include/asm/mmu.h
555     +++ b/arch/powerpc/include/asm/mmu.h
556     @@ -136,6 +136,7 @@ enum {
557     MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
558     MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
559     MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
560     + MMU_FTR_KERNEL_RO |
561     #ifdef CONFIG_PPC_RADIX_MMU
562     MMU_FTR_TYPE_RADIX |
563     #endif
564     diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
565     index 37c027ca83b2..7803756998e2 100644
566     --- a/arch/powerpc/kernel/cpu_setup_power.S
567     +++ b/arch/powerpc/kernel/cpu_setup_power.S
568     @@ -100,6 +100,8 @@ _GLOBAL(__setup_cpu_power9)
569     mfspr r3,SPRN_LPCR
570     LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
571     or r3, r3, r4
572     + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
573     + andc r3, r3, r4
574     bl __init_LPCR
575     bl __init_HFSCR
576     bl __init_tlb_power9
577     @@ -120,6 +122,8 @@ _GLOBAL(__restore_cpu_power9)
578     mfspr r3,SPRN_LPCR
579     LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
580     or r3, r3, r4
581     + LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
582     + andc r3, r3, r4
583     bl __init_LPCR
584     bl __init_HFSCR
585     bl __init_tlb_power9
586     diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
587     index 03d089b3ed72..469d86d1c2a5 100644
588     --- a/arch/powerpc/kernel/hw_breakpoint.c
589     +++ b/arch/powerpc/kernel/hw_breakpoint.c
590     @@ -228,8 +228,10 @@ int hw_breakpoint_handler(struct die_args *args)
591     rcu_read_lock();
592    
593     bp = __this_cpu_read(bp_per_reg);
594     - if (!bp)
595     + if (!bp) {
596     + rc = NOTIFY_DONE;
597     goto out;
598     + }
599     info = counter_arch_bp(bp);
600    
601     /*
602     diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
603     index 34684adb6899..b3b09b98896d 100644
604     --- a/arch/x86/include/asm/pkeys.h
605     +++ b/arch/x86/include/asm/pkeys.h
606     @@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
607     static inline
608     bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
609     {
610     + /*
611     + * "Allocated" pkeys are those that have been returned
612     + * from pkey_alloc(). pkey 0 is special, and never
613     + * returned from pkey_alloc().
614     + */
615     + if (pkey <= 0)
616     + return false;
617     + if (pkey >= arch_max_pkey())
618     + return false;
619     return mm_pkey_allocation_map(mm) & (1U << pkey);
620     }
621    
622     @@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm)
623     static inline
624     int mm_pkey_free(struct mm_struct *mm, int pkey)
625     {
626     - /*
627     - * pkey 0 is special, always allocated and can never
628     - * be freed.
629     - */
630     - if (!pkey)
631     - return -EINVAL;
632     if (!mm_pkey_is_allocated(mm, pkey))
633     return -EINVAL;
634    
635     diff --git a/crypto/testmgr.h b/crypto/testmgr.h
636     index e64a4ef9d8ca..9033088ca231 100644
637     --- a/crypto/testmgr.h
638     +++ b/crypto/testmgr.h
639     @@ -22813,7 +22813,7 @@ static struct aead_testvec aes_ccm_enc_tv_template[] = {
640     "\x09\x75\x9a\x9b\x3c\x9b\x27\x39",
641     .klen = 32,
642     .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d"
643     - "\x43\xf6\x1e\x50",
644     + "\x43\xf6\x1e\x50\0\0\0\0",
645     .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
646     "\x13\x02\x01\x0c\x83\x4c\x96\x35"
647     "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
648     diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
649     index 2c1798e38abd..38688236b3cd 100644
650     --- a/drivers/bcma/main.c
651     +++ b/drivers/bcma/main.c
652     @@ -633,8 +633,11 @@ static int bcma_device_probe(struct device *dev)
653     drv);
654     int err = 0;
655    
656     + get_device(dev);
657     if (adrv->probe)
658     err = adrv->probe(core);
659     + if (err)
660     + put_device(dev);
661    
662     return err;
663     }
664     @@ -647,6 +650,7 @@ static int bcma_device_remove(struct device *dev)
665    
666     if (adrv->remove)
667     adrv->remove(core);
668     + put_device(dev);
669    
670     return 0;
671     }
672     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
673     index 4af818766797..24d6cefceb32 100644
674     --- a/drivers/block/loop.c
675     +++ b/drivers/block/loop.c
676     @@ -1097,9 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
677     if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
678     return -EINVAL;
679    
680     + /* I/O need to be drained during transfer transition */
681     + blk_mq_freeze_queue(lo->lo_queue);
682     +
683     err = loop_release_xfer(lo);
684     if (err)
685     - return err;
686     + goto exit;
687    
688     if (info->lo_encrypt_type) {
689     unsigned int type = info->lo_encrypt_type;
690     @@ -1114,12 +1117,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
691    
692     err = loop_init_xfer(lo, xfer, info);
693     if (err)
694     - return err;
695     + goto exit;
696    
697     if (lo->lo_offset != info->lo_offset ||
698     lo->lo_sizelimit != info->lo_sizelimit)
699     - if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit))
700     - return -EFBIG;
701     + if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
702     + err = -EFBIG;
703     + goto exit;
704     + }
705    
706     loop_config_discard(lo);
707    
708     @@ -1137,13 +1142,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
709     (info->lo_flags & LO_FLAGS_AUTOCLEAR))
710     lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
711    
712     - if ((info->lo_flags & LO_FLAGS_PARTSCAN) &&
713     - !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
714     - lo->lo_flags |= LO_FLAGS_PARTSCAN;
715     - lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
716     - loop_reread_partitions(lo, lo->lo_device);
717     - }
718     -
719     lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
720     lo->lo_init[0] = info->lo_init[0];
721     lo->lo_init[1] = info->lo_init[1];
722     @@ -1156,7 +1154,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
723     /* update dio if lo_offset or transfer is changed */
724     __loop_update_dio(lo, lo->use_dio);
725    
726     - return 0;
727     + exit:
728     + blk_mq_unfreeze_queue(lo->lo_queue);
729     +
730     + if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
731     + !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
732     + lo->lo_flags |= LO_FLAGS_PARTSCAN;
733     + lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
734     + loop_reread_partitions(lo, lo->lo_device);
735     + }
736     +
737     + return err;
738     }
739    
740     static int
741     diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
742     index eaf5730d79eb..8022bea27fed 100644
743     --- a/drivers/char/tpm/tpm_tis.c
744     +++ b/drivers/char/tpm/tpm_tis.c
745     @@ -421,7 +421,7 @@ static int __init init_tis(void)
746     acpi_bus_unregister_driver(&tis_acpi_driver);
747     err_acpi:
748     #endif
749     - platform_device_unregister(force_pdev);
750     + platform_driver_unregister(&tis_drv);
751     err_platform:
752     if (force_pdev)
753     platform_device_unregister(force_pdev);
754     diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
755     index 712592cef1a2..7309c0824887 100644
756     --- a/drivers/devfreq/devfreq.c
757     +++ b/drivers/devfreq/devfreq.c
758     @@ -130,7 +130,7 @@ static void devfreq_set_freq_table(struct devfreq *devfreq)
759     * @devfreq: the devfreq instance
760     * @freq: the update target frequency
761     */
762     -static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
763     +int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
764     {
765     int lev, prev_lev, ret = 0;
766     unsigned long cur_time;
767     @@ -166,6 +166,7 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
768     devfreq->last_stat_updated = cur_time;
769     return ret;
770     }
771     +EXPORT_SYMBOL(devfreq_update_status);
772    
773     /**
774     * find_devfreq_governor() - find devfreq governor from name
775     @@ -939,6 +940,9 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
776     if (df->governor == governor) {
777     ret = 0;
778     goto out;
779     + } else if (df->governor->immutable || governor->immutable) {
780     + ret = -EINVAL;
781     + goto out;
782     }
783    
784     if (df->governor) {
785     @@ -968,13 +972,33 @@ static ssize_t available_governors_show(struct device *d,
786     struct device_attribute *attr,
787     char *buf)
788     {
789     - struct devfreq_governor *tmp_governor;
790     + struct devfreq *df = to_devfreq(d);
791     ssize_t count = 0;
792    
793     mutex_lock(&devfreq_list_lock);
794     - list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
795     - count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
796     - "%s ", tmp_governor->name);
797     +
798     + /*
799     + * The devfreq with immutable governor (e.g., passive) shows
800     + * only own governor.
801     + */
802     + if (df->governor->immutable) {
803     + count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
804     + "%s ", df->governor_name);
805     + /*
806     + * The devfreq device shows the registered governor except for
807     + * immutable governors such as passive governor .
808     + */
809     + } else {
810     + struct devfreq_governor *governor;
811     +
812     + list_for_each_entry(governor, &devfreq_governor_list, node) {
813     + if (governor->immutable)
814     + continue;
815     + count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
816     + "%s ", governor->name);
817     + }
818     + }
819     +
820     mutex_unlock(&devfreq_list_lock);
821    
822     /* Truncate the trailing space */
823     diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
824     index fad7d6321978..71576b8bdfef 100644
825     --- a/drivers/devfreq/governor.h
826     +++ b/drivers/devfreq/governor.h
827     @@ -38,4 +38,6 @@ extern void devfreq_interval_update(struct devfreq *devfreq,
828     extern int devfreq_add_governor(struct devfreq_governor *governor);
829     extern int devfreq_remove_governor(struct devfreq_governor *governor);
830    
831     +extern int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
832     +
833     #endif /* _GOVERNOR_H */
834     diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
835     index 9ef46e2592c4..5be96b2249e7 100644
836     --- a/drivers/devfreq/governor_passive.c
837     +++ b/drivers/devfreq/governor_passive.c
838     @@ -112,6 +112,11 @@ static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
839     if (ret < 0)
840     goto out;
841    
842     + if (devfreq->profile->freq_table
843     + && (devfreq_update_status(devfreq, freq)))
844     + dev_err(&devfreq->dev,
845     + "Couldn't update frequency transition information.\n");
846     +
847     devfreq->previous_freq = freq;
848    
849     out:
850     @@ -179,6 +184,7 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
851    
852     static struct devfreq_governor devfreq_passive = {
853     .name = "passive",
854     + .immutable = 1,
855     .get_target_freq = devfreq_passive_get_target_freq,
856     .event_handler = devfreq_passive_event_handler,
857     };
858     diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
859     index dd184b50e5b4..284627806b88 100644
860     --- a/drivers/dma/ipu/ipu_irq.c
861     +++ b/drivers/dma/ipu/ipu_irq.c
862     @@ -272,7 +272,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
863     u32 status;
864     int i, line;
865    
866     - for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
867     + for (i = 0; i < IPU_IRQ_NR_BANKS; i++) {
868     struct ipu_irq_bank *bank = irq_bank + i;
869    
870     raw_spin_lock(&bank_lock);
871     diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
872     index 5fb4c6d9209b..be34547cdb68 100644
873     --- a/drivers/hv/channel.c
874     +++ b/drivers/hv/channel.c
875     @@ -157,6 +157,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
876     }
877    
878     init_completion(&open_info->waitevent);
879     + open_info->waiting_channel = newchannel;
880    
881     open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
882     open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
883     @@ -181,7 +182,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
884     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
885    
886     ret = vmbus_post_msg(open_msg,
887     - sizeof(struct vmbus_channel_open_channel));
888     + sizeof(struct vmbus_channel_open_channel), true);
889    
890     if (ret != 0) {
891     err = ret;
892     @@ -194,6 +195,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
893     list_del(&open_info->msglistentry);
894     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
895    
896     + if (newchannel->rescind) {
897     + err = -ENODEV;
898     + goto error_free_gpadl;
899     + }
900     +
901     if (open_info->response.open_result.status) {
902     err = -EAGAIN;
903     goto error_free_gpadl;
904     @@ -233,7 +239,7 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
905     conn_msg.guest_endpoint_id = *shv_guest_servie_id;
906     conn_msg.host_service_id = *shv_host_servie_id;
907    
908     - return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
909     + return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
910     }
911     EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
912    
913     @@ -405,6 +411,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
914     return ret;
915    
916     init_completion(&msginfo->waitevent);
917     + msginfo->waiting_channel = channel;
918    
919     gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
920     gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
921     @@ -419,7 +426,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
922     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
923    
924     ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
925     - sizeof(*msginfo));
926     + sizeof(*msginfo), true);
927     if (ret != 0)
928     goto cleanup;
929    
930     @@ -433,14 +440,19 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
931     gpadl_body->gpadl = next_gpadl_handle;
932    
933     ret = vmbus_post_msg(gpadl_body,
934     - submsginfo->msgsize -
935     - sizeof(*submsginfo));
936     + submsginfo->msgsize - sizeof(*submsginfo),
937     + true);
938     if (ret != 0)
939     goto cleanup;
940    
941     }
942     wait_for_completion(&msginfo->waitevent);
943    
944     + if (channel->rescind) {
945     + ret = -ENODEV;
946     + goto cleanup;
947     + }
948     +
949     /* At this point, we received the gpadl created msg */
950     *gpadl_handle = gpadlmsg->gpadl;
951    
952     @@ -474,6 +486,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
953     return -ENOMEM;
954    
955     init_completion(&info->waitevent);
956     + info->waiting_channel = channel;
957    
958     msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
959    
960     @@ -485,14 +498,19 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
961     list_add_tail(&info->msglistentry,
962     &vmbus_connection.chn_msg_list);
963     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
964     - ret = vmbus_post_msg(msg,
965     - sizeof(struct vmbus_channel_gpadl_teardown));
966     + ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
967     + true);
968    
969     if (ret)
970     goto post_msg_err;
971    
972     wait_for_completion(&info->waitevent);
973    
974     + if (channel->rescind) {
975     + ret = -ENODEV;
976     + goto post_msg_err;
977     + }
978     +
979     post_msg_err:
980     spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
981     list_del(&info->msglistentry);
982     @@ -557,7 +575,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
983     msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
984     msg->child_relid = channel->offermsg.child_relid;
985    
986     - ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
987     + ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
988     + true);
989    
990     if (ret) {
991     pr_err("Close failed: close post msg return is %d\n", ret);
992     diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
993     index caf341842464..cb9531541a12 100644
994     --- a/drivers/hv/channel_mgmt.c
995     +++ b/drivers/hv/channel_mgmt.c
996     @@ -147,6 +147,29 @@ static const struct {
997     { HV_RDV_GUID },
998     };
999    
1000     +/*
1001     + * The rescinded channel may be blocked waiting for a response from the host;
1002     + * take care of that.
1003     + */
1004     +static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
1005     +{
1006     + struct vmbus_channel_msginfo *msginfo;
1007     + unsigned long flags;
1008     +
1009     +
1010     + spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1011     +
1012     + list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1013     + msglistentry) {
1014     +
1015     + if (msginfo->waiting_channel == channel) {
1016     + complete(&msginfo->waitevent);
1017     + break;
1018     + }
1019     + }
1020     + spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1021     +}
1022     +
1023     static bool is_unsupported_vmbus_devs(const uuid_le *guid)
1024     {
1025     int i;
1026     @@ -321,7 +344,8 @@ static void vmbus_release_relid(u32 relid)
1027     memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
1028     msg.child_relid = relid;
1029     msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
1030     - vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
1031     + vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
1032     + true);
1033     }
1034    
1035     void hv_event_tasklet_disable(struct vmbus_channel *channel)
1036     @@ -728,7 +752,8 @@ void vmbus_initiate_unload(bool crash)
1037     init_completion(&vmbus_connection.unload_event);
1038     memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
1039     hdr.msgtype = CHANNELMSG_UNLOAD;
1040     - vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
1041     + vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
1042     + !crash);
1043    
1044     /*
1045     * vmbus_initiate_unload() is also called on crash and the crash can be
1046     @@ -823,6 +848,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1047     channel->rescind = true;
1048     spin_unlock_irqrestore(&channel->lock, flags);
1049    
1050     + vmbus_rescind_cleanup(channel);
1051     +
1052     if (channel->device_obj) {
1053     if (channel->chn_rescind_callback) {
1054     channel->chn_rescind_callback(channel);
1055     @@ -1116,8 +1143,8 @@ int vmbus_request_offers(void)
1056     msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1057    
1058    
1059     - ret = vmbus_post_msg(msg,
1060     - sizeof(struct vmbus_channel_message_header));
1061     + ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1062     + true);
1063     if (ret != 0) {
1064     pr_err("Unable to request offers - %d\n", ret);
1065    
1066     diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
1067     index 78e6368a4423..840b6db0ea4b 100644
1068     --- a/drivers/hv/connection.c
1069     +++ b/drivers/hv/connection.c
1070     @@ -110,7 +110,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
1071     spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1072    
1073     ret = vmbus_post_msg(msg,
1074     - sizeof(struct vmbus_channel_initiate_contact));
1075     + sizeof(struct vmbus_channel_initiate_contact),
1076     + true);
1077     if (ret != 0) {
1078     spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1079     list_del(&msginfo->msglistentry);
1080     @@ -434,7 +435,7 @@ void vmbus_on_event(unsigned long data)
1081     /*
1082     * vmbus_post_msg - Send a msg on the vmbus's message connection
1083     */
1084     -int vmbus_post_msg(void *buffer, size_t buflen)
1085     +int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
1086     {
1087     union hv_connection_id conn_id;
1088     int ret = 0;
1089     @@ -449,7 +450,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
1090     * insufficient resources. Retry the operation a couple of
1091     * times before giving up.
1092     */
1093     - while (retries < 20) {
1094     + while (retries < 100) {
1095     ret = hv_post_message(conn_id, 1, buffer, buflen);
1096    
1097     switch (ret) {
1098     @@ -472,8 +473,14 @@ int vmbus_post_msg(void *buffer, size_t buflen)
1099     }
1100    
1101     retries++;
1102     - udelay(usec);
1103     - if (usec < 2048)
1104     + if (can_sleep && usec > 1000)
1105     + msleep(usec / 1000);
1106     + else if (usec < MAX_UDELAY_MS * 1000)
1107     + udelay(usec);
1108     + else
1109     + mdelay(usec / 1000);
1110     +
1111     + if (usec < 256000)
1112     usec *= 2;
1113     }
1114     return ret;
1115     diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
1116     index 60dbd6cb4640..6e49a4dd99c0 100644
1117     --- a/drivers/hv/hv.c
1118     +++ b/drivers/hv/hv.c
1119     @@ -309,9 +309,10 @@ void hv_cleanup(bool crash)
1120    
1121     hypercall_msr.as_uint64 = 0;
1122     wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
1123     - if (!crash)
1124     + if (!crash) {
1125     vfree(hv_context.tsc_page);
1126     - hv_context.tsc_page = NULL;
1127     + hv_context.tsc_page = NULL;
1128     + }
1129     }
1130     #endif
1131     }
1132     @@ -411,7 +412,7 @@ int hv_synic_alloc(void)
1133     goto err;
1134     }
1135    
1136     - for_each_online_cpu(cpu) {
1137     + for_each_present_cpu(cpu) {
1138     hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
1139     if (hv_context.event_dpc[cpu] == NULL) {
1140     pr_err("Unable to allocate event dpc\n");
1141     @@ -457,6 +458,8 @@ int hv_synic_alloc(void)
1142     pr_err("Unable to allocate post msg page\n");
1143     goto err;
1144     }
1145     +
1146     + INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
1147     }
1148    
1149     return 0;
1150     @@ -482,7 +485,7 @@ void hv_synic_free(void)
1151     int cpu;
1152    
1153     kfree(hv_context.hv_numa_map);
1154     - for_each_online_cpu(cpu)
1155     + for_each_present_cpu(cpu)
1156     hv_synic_free_cpu(cpu);
1157     }
1158    
1159     @@ -552,8 +555,6 @@ void hv_synic_init(void *arg)
1160     rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
1161     hv_context.vp_index[cpu] = (u32)vp_index;
1162    
1163     - INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
1164     -
1165     /*
1166     * Register the per-cpu clockevent source.
1167     */
1168     diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
1169     index 8b2ba98831ec..e47d8c9db03a 100644
1170     --- a/drivers/hv/hv_fcopy.c
1171     +++ b/drivers/hv/hv_fcopy.c
1172     @@ -61,6 +61,7 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
1173     static const char fcopy_devname[] = "vmbus/hv_fcopy";
1174     static u8 *recv_buffer;
1175     static struct hvutil_transport *hvt;
1176     +static struct completion release_event;
1177     /*
1178     * This state maintains the version number registered by the daemon.
1179     */
1180     @@ -317,6 +318,7 @@ static void fcopy_on_reset(void)
1181    
1182     if (cancel_delayed_work_sync(&fcopy_timeout_work))
1183     fcopy_respond_to_host(HV_E_FAIL);
1184     + complete(&release_event);
1185     }
1186    
1187     int hv_fcopy_init(struct hv_util_service *srv)
1188     @@ -324,6 +326,7 @@ int hv_fcopy_init(struct hv_util_service *srv)
1189     recv_buffer = srv->recv_buffer;
1190     fcopy_transaction.recv_channel = srv->channel;
1191    
1192     + init_completion(&release_event);
1193     /*
1194     * When this driver loads, the user level daemon that
1195     * processes the host requests may not yet be running.
1196     @@ -345,4 +348,5 @@ void hv_fcopy_deinit(void)
1197     fcopy_transaction.state = HVUTIL_DEVICE_DYING;
1198     cancel_delayed_work_sync(&fcopy_timeout_work);
1199     hvutil_transport_destroy(hvt);
1200     + wait_for_completion(&release_event);
1201     }
1202     diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
1203     index 5e1fdc8d32ab..3abfc5983c97 100644
1204     --- a/drivers/hv/hv_kvp.c
1205     +++ b/drivers/hv/hv_kvp.c
1206     @@ -88,6 +88,7 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
1207     static const char kvp_devname[] = "vmbus/hv_kvp";
1208     static u8 *recv_buffer;
1209     static struct hvutil_transport *hvt;
1210     +static struct completion release_event;
1211     /*
1212     * Register the kernel component with the user-level daemon.
1213     * As part of this registration, pass the LIC version number.
1214     @@ -716,6 +717,7 @@ static void kvp_on_reset(void)
1215     if (cancel_delayed_work_sync(&kvp_timeout_work))
1216     kvp_respond_to_host(NULL, HV_E_FAIL);
1217     kvp_transaction.state = HVUTIL_DEVICE_INIT;
1218     + complete(&release_event);
1219     }
1220    
1221     int
1222     @@ -724,6 +726,7 @@ hv_kvp_init(struct hv_util_service *srv)
1223     recv_buffer = srv->recv_buffer;
1224     kvp_transaction.recv_channel = srv->channel;
1225    
1226     + init_completion(&release_event);
1227     /*
1228     * When this driver loads, the user level daemon that
1229     * processes the host requests may not yet be running.
1230     @@ -747,4 +750,5 @@ void hv_kvp_deinit(void)
1231     cancel_delayed_work_sync(&kvp_timeout_work);
1232     cancel_work_sync(&kvp_sendkey_work);
1233     hvutil_transport_destroy(hvt);
1234     + wait_for_completion(&release_event);
1235     }
1236     diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
1237     index a6707133c297..a76e3db0d01f 100644
1238     --- a/drivers/hv/hv_snapshot.c
1239     +++ b/drivers/hv/hv_snapshot.c
1240     @@ -66,6 +66,7 @@ static int dm_reg_value;
1241     static const char vss_devname[] = "vmbus/hv_vss";
1242     static __u8 *recv_buffer;
1243     static struct hvutil_transport *hvt;
1244     +static struct completion release_event;
1245    
1246     static void vss_timeout_func(struct work_struct *dummy);
1247     static void vss_handle_request(struct work_struct *dummy);
1248     @@ -330,11 +331,13 @@ static void vss_on_reset(void)
1249     if (cancel_delayed_work_sync(&vss_timeout_work))
1250     vss_respond_to_host(HV_E_FAIL);
1251     vss_transaction.state = HVUTIL_DEVICE_INIT;
1252     + complete(&release_event);
1253     }
1254    
1255     int
1256     hv_vss_init(struct hv_util_service *srv)
1257     {
1258     + init_completion(&release_event);
1259     if (vmbus_proto_version < VERSION_WIN8_1) {
1260     pr_warn("Integration service 'Backup (volume snapshot)'"
1261     " not supported on this host version.\n");
1262     @@ -365,4 +368,5 @@ void hv_vss_deinit(void)
1263     cancel_delayed_work_sync(&vss_timeout_work);
1264     cancel_work_sync(&vss_handle_request_work);
1265     hvutil_transport_destroy(hvt);
1266     + wait_for_completion(&release_event);
1267     }
1268     diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
1269     index 2b13f2a0a71e..8d7f865c1133 100644
1270     --- a/drivers/hv/hyperv_vmbus.h
1271     +++ b/drivers/hv/hyperv_vmbus.h
1272     @@ -683,7 +683,7 @@ void vmbus_free_channels(void);
1273     int vmbus_connect(void);
1274     void vmbus_disconnect(void);
1275    
1276     -int vmbus_post_msg(void *buffer, size_t buflen);
1277     +int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
1278    
1279     void vmbus_on_event(unsigned long data);
1280     void vmbus_on_msg_dpc(unsigned long data);
1281     diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
1282     index 308dbda700eb..e94ed1c22c8b 100644
1283     --- a/drivers/hv/ring_buffer.c
1284     +++ b/drivers/hv/ring_buffer.c
1285     @@ -298,6 +298,9 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
1286     unsigned long flags = 0;
1287     struct hv_ring_buffer_info *outring_info = &channel->outbound;
1288    
1289     + if (channel->rescind)
1290     + return -ENODEV;
1291     +
1292     for (i = 0; i < kv_count; i++)
1293     totalbytes_towrite += kv_list[i].iov_len;
1294    
1295     @@ -350,6 +353,10 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
1296     spin_unlock_irqrestore(&outring_info->ring_lock, flags);
1297    
1298     hv_signal_on_write(old_write, channel, kick_q);
1299     +
1300     + if (channel->rescind)
1301     + return -ENODEV;
1302     +
1303     return 0;
1304     }
1305    
1306     diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
1307     index ad82cb28d87a..43146162c122 100644
1308     --- a/drivers/hwmon/it87.c
1309     +++ b/drivers/hwmon/it87.c
1310     @@ -1300,25 +1300,35 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
1311     it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
1312     data->fan_main_ctrl);
1313     } else {
1314     + u8 ctrl;
1315     +
1316     /* No on/off mode, set maximum pwm value */
1317     data->pwm_duty[nr] = pwm_to_reg(data, 0xff);
1318     it87_write_value(data, IT87_REG_PWM_DUTY[nr],
1319     data->pwm_duty[nr]);
1320     /* and set manual mode */
1321     - data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
1322     - data->pwm_temp_map[nr] :
1323     - data->pwm_duty[nr];
1324     - it87_write_value(data, IT87_REG_PWM[nr],
1325     - data->pwm_ctrl[nr]);
1326     + if (has_newer_autopwm(data)) {
1327     + ctrl = (data->pwm_ctrl[nr] & 0x7c) |
1328     + data->pwm_temp_map[nr];
1329     + } else {
1330     + ctrl = data->pwm_duty[nr];
1331     + }
1332     + data->pwm_ctrl[nr] = ctrl;
1333     + it87_write_value(data, IT87_REG_PWM[nr], ctrl);
1334     }
1335     } else {
1336     - if (val == 1) /* Manual mode */
1337     - data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
1338     - data->pwm_temp_map[nr] :
1339     - data->pwm_duty[nr];
1340     - else /* Automatic mode */
1341     - data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
1342     - it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
1343     + u8 ctrl;
1344     +
1345     + if (has_newer_autopwm(data)) {
1346     + ctrl = (data->pwm_ctrl[nr] & 0x7c) |
1347     + data->pwm_temp_map[nr];
1348     + if (val != 1)
1349     + ctrl |= 0x80;
1350     + } else {
1351     + ctrl = (val == 1 ? data->pwm_duty[nr] : 0x80);
1352     + }
1353     + data->pwm_ctrl[nr] = ctrl;
1354     + it87_write_value(data, IT87_REG_PWM[nr], ctrl);
1355    
1356     if (data->type != it8603 && nr < 3) {
1357     /* set SmartGuardian mode */
1358     @@ -1344,6 +1354,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
1359     return -EINVAL;
1360    
1361     mutex_lock(&data->update_lock);
1362     + it87_update_pwm_ctrl(data, nr);
1363     if (has_newer_autopwm(data)) {
1364     /*
1365     * If we are in automatic mode, the PWM duty cycle register
1366     @@ -1456,13 +1467,15 @@ static ssize_t set_pwm_temp_map(struct device *dev,
1367     }
1368    
1369     mutex_lock(&data->update_lock);
1370     + it87_update_pwm_ctrl(data, nr);
1371     data->pwm_temp_map[nr] = reg;
1372     /*
1373     * If we are in automatic mode, write the temp mapping immediately;
1374     * otherwise, just store it for later use.
1375     */
1376     if (data->pwm_ctrl[nr] & 0x80) {
1377     - data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
1378     + data->pwm_ctrl[nr] = (data->pwm_ctrl[nr] & 0xfc) |
1379     + data->pwm_temp_map[nr];
1380     it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
1381     }
1382     mutex_unlock(&data->update_lock);
1383     diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
1384     index 49e0f1b925a5..8e7905632d25 100644
1385     --- a/drivers/hwtracing/coresight/coresight-stm.c
1386     +++ b/drivers/hwtracing/coresight/coresight-stm.c
1387     @@ -356,7 +356,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
1388     if (!drvdata || !drvdata->csdev)
1389     return;
1390    
1391     - stm_disable(drvdata->csdev, NULL);
1392     + coresight_disable(drvdata->csdev);
1393     }
1394    
1395     static phys_addr_t
1396     diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
1397     index 73f2f0c46e62..8f2bce213248 100644
1398     --- a/drivers/iio/pressure/mpl115.c
1399     +++ b/drivers/iio/pressure/mpl115.c
1400     @@ -137,6 +137,7 @@ static const struct iio_chan_spec mpl115_channels[] = {
1401     {
1402     .type = IIO_TEMP,
1403     .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
1404     + .info_mask_shared_by_type =
1405     BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
1406     },
1407     };
1408     diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
1409     index 6392d7b62841..eb87948fc559 100644
1410     --- a/drivers/iio/pressure/mpl3115.c
1411     +++ b/drivers/iio/pressure/mpl3115.c
1412     @@ -182,7 +182,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
1413     {
1414     .type = IIO_PRESSURE,
1415     .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
1416     - BIT(IIO_CHAN_INFO_SCALE),
1417     + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1418     .scan_index = 0,
1419     .scan_type = {
1420     .sign = 'u',
1421     @@ -195,7 +195,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
1422     {
1423     .type = IIO_TEMP,
1424     .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
1425     - BIT(IIO_CHAN_INFO_SCALE),
1426     + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1427     .scan_index = 1,
1428     .scan_type = {
1429     .sign = 's',
1430     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1431     index c25768c2dd3b..f2d40c05ef9e 100644
1432     --- a/drivers/infiniband/core/cma.c
1433     +++ b/drivers/infiniband/core/cma.c
1434     @@ -3540,6 +3540,9 @@ static int cma_accept_iw(struct rdma_id_private *id_priv,
1435     struct iw_cm_conn_param iw_param;
1436     int ret;
1437    
1438     + if (!conn_param)
1439     + return -EINVAL;
1440     +
1441     ret = cma_modify_qp_rtr(id_priv, conn_param);
1442     if (ret)
1443     return ret;
1444     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1445     index d82637ab09fd..34be95ee9038 100644
1446     --- a/drivers/iommu/intel-iommu.c
1447     +++ b/drivers/iommu/intel-iommu.c
1448     @@ -3325,13 +3325,14 @@ static int __init init_dmars(void)
1449     iommu_identity_mapping |= IDENTMAP_GFX;
1450     #endif
1451    
1452     + check_tylersburg_isoch();
1453     +
1454     if (iommu_identity_mapping) {
1455     ret = si_domain_init(hw_pass_through);
1456     if (ret)
1457     goto free_iommu;
1458     }
1459    
1460     - check_tylersburg_isoch();
1461    
1462     /*
1463     * If we copied translations from a previous kernel in the kdump
1464     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
1465     index 59b2c50562e4..c817627d09ca 100644
1466     --- a/drivers/md/dm-cache-target.c
1467     +++ b/drivers/md/dm-cache-target.c
1468     @@ -248,7 +248,7 @@ struct cache {
1469     /*
1470     * Fields for converting from sectors to blocks.
1471     */
1472     - uint32_t sectors_per_block;
1473     + sector_t sectors_per_block;
1474     int sectors_per_block_shift;
1475    
1476     spinlock_t lock;
1477     @@ -3546,11 +3546,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
1478    
1479     residency = policy_residency(cache->policy);
1480    
1481     - DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
1482     + DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
1483     (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
1484     (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
1485     (unsigned long long)nr_blocks_metadata,
1486     - cache->sectors_per_block,
1487     + (unsigned long long)cache->sectors_per_block,
1488     (unsigned long long) from_cblock(residency),
1489     (unsigned long long) from_cblock(cache->cache_size),
1490     (unsigned) atomic_read(&cache->stats.read_hit),
1491     diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
1492     index af2d79b52484..15daa36fcea6 100644
1493     --- a/drivers/md/dm-raid.c
1494     +++ b/drivers/md/dm-raid.c
1495     @@ -3621,6 +3621,8 @@ static int raid_preresume(struct dm_target *ti)
1496     return r;
1497     }
1498    
1499     +#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET)
1500     +
1501     static void raid_resume(struct dm_target *ti)
1502     {
1503     struct raid_set *rs = ti->private;
1504     @@ -3638,7 +3640,15 @@ static void raid_resume(struct dm_target *ti)
1505     mddev->ro = 0;
1506     mddev->in_sync = 0;
1507    
1508     - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1509     + /*
1510     + * Keep the RAID set frozen if reshape/rebuild flags are set.
1511     + * The RAID set is unfrozen once the next table load/resume,
1512     + * which clears the reshape/rebuild flags, occurs.
1513     + * This ensures that the constructor for the inactive table
1514     + * retrieves an up-to-date reshape_position.
1515     + */
1516     + if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
1517     + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1518    
1519     if (mddev->suspended)
1520     mddev_resume(mddev);
1521     diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
1522     index 6c25213ab38c..bdbb7e6e8212 100644
1523     --- a/drivers/md/dm-round-robin.c
1524     +++ b/drivers/md/dm-round-robin.c
1525     @@ -17,8 +17,8 @@
1526     #include <linux/module.h>
1527    
1528     #define DM_MSG_PREFIX "multipath round-robin"
1529     -#define RR_MIN_IO 1000
1530     -#define RR_VERSION "1.1.0"
1531     +#define RR_MIN_IO 1
1532     +#define RR_VERSION "1.2.0"
1533    
1534     /*-----------------------------------------------------------------
1535     * Path-handling code, paths are held in lists
1536     @@ -47,44 +47,19 @@ struct selector {
1537     struct list_head valid_paths;
1538     struct list_head invalid_paths;
1539     spinlock_t lock;
1540     - struct dm_path * __percpu *current_path;
1541     - struct percpu_counter repeat_count;
1542     };
1543    
1544     -static void set_percpu_current_path(struct selector *s, struct dm_path *path)
1545     -{
1546     - int cpu;
1547     -
1548     - for_each_possible_cpu(cpu)
1549     - *per_cpu_ptr(s->current_path, cpu) = path;
1550     -}
1551     -
1552     static struct selector *alloc_selector(void)
1553     {
1554     struct selector *s = kmalloc(sizeof(*s), GFP_KERNEL);
1555    
1556     - if (!s)
1557     - return NULL;
1558     -
1559     - INIT_LIST_HEAD(&s->valid_paths);
1560     - INIT_LIST_HEAD(&s->invalid_paths);
1561     - spin_lock_init(&s->lock);
1562     -
1563     - s->current_path = alloc_percpu(struct dm_path *);
1564     - if (!s->current_path)
1565     - goto out_current_path;
1566     - set_percpu_current_path(s, NULL);
1567     -
1568     - if (percpu_counter_init(&s->repeat_count, 0, GFP_KERNEL))
1569     - goto out_repeat_count;
1570     + if (s) {
1571     + INIT_LIST_HEAD(&s->valid_paths);
1572     + INIT_LIST_HEAD(&s->invalid_paths);
1573     + spin_lock_init(&s->lock);
1574     + }
1575    
1576     return s;
1577     -
1578     -out_repeat_count:
1579     - free_percpu(s->current_path);
1580     -out_current_path:
1581     - kfree(s);
1582     - return NULL;;
1583     }
1584    
1585     static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
1586     @@ -105,8 +80,6 @@ static void rr_destroy(struct path_selector *ps)
1587    
1588     free_paths(&s->valid_paths);
1589     free_paths(&s->invalid_paths);
1590     - free_percpu(s->current_path);
1591     - percpu_counter_destroy(&s->repeat_count);
1592     kfree(s);
1593     ps->context = NULL;
1594     }
1595     @@ -157,6 +130,11 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
1596     return -EINVAL;
1597     }
1598    
1599     + if (repeat_count > 1) {
1600     + DMWARN_LIMIT("repeat_count > 1 is deprecated, using 1 instead");
1601     + repeat_count = 1;
1602     + }
1603     +
1604     /* allocate the path */
1605     pi = kmalloc(sizeof(*pi), GFP_KERNEL);
1606     if (!pi) {
1607     @@ -183,9 +161,6 @@ static void rr_fail_path(struct path_selector *ps, struct dm_path *p)
1608     struct path_info *pi = p->pscontext;
1609    
1610     spin_lock_irqsave(&s->lock, flags);
1611     - if (p == *this_cpu_ptr(s->current_path))
1612     - set_percpu_current_path(s, NULL);
1613     -
1614     list_move(&pi->list, &s->invalid_paths);
1615     spin_unlock_irqrestore(&s->lock, flags);
1616     }
1617     @@ -208,29 +183,15 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes)
1618     unsigned long flags;
1619     struct selector *s = ps->context;
1620     struct path_info *pi = NULL;
1621     - struct dm_path *current_path = NULL;
1622     -
1623     - local_irq_save(flags);
1624     - current_path = *this_cpu_ptr(s->current_path);
1625     - if (current_path) {
1626     - percpu_counter_dec(&s->repeat_count);
1627     - if (percpu_counter_read_positive(&s->repeat_count) > 0) {
1628     - local_irq_restore(flags);
1629     - return current_path;
1630     - }
1631     - }
1632    
1633     - spin_lock(&s->lock);
1634     + spin_lock_irqsave(&s->lock, flags);
1635     if (!list_empty(&s->valid_paths)) {
1636     pi = list_entry(s->valid_paths.next, struct path_info, list);
1637     list_move_tail(&pi->list, &s->valid_paths);
1638     - percpu_counter_set(&s->repeat_count, pi->repeat_count);
1639     - set_percpu_current_path(s, pi->path);
1640     - current_path = pi->path;
1641     }
1642     spin_unlock_irqrestore(&s->lock, flags);
1643    
1644     - return current_path;
1645     + return pi ? pi->path : NULL;
1646     }
1647    
1648     static struct path_selector_type rr_ps = {
1649     diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
1650     index 38b05f23b96c..0250e7e521ab 100644
1651     --- a/drivers/md/dm-stats.c
1652     +++ b/drivers/md/dm-stats.c
1653     @@ -175,6 +175,7 @@ static void dm_stat_free(struct rcu_head *head)
1654     int cpu;
1655     struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
1656    
1657     + kfree(s->histogram_boundaries);
1658     kfree(s->program_id);
1659     kfree(s->aux_data);
1660     for_each_possible_cpu(cpu) {
1661     diff --git a/drivers/md/linear.c b/drivers/md/linear.c
1662     index 86f5d435901d..b0c0aef92a37 100644
1663     --- a/drivers/md/linear.c
1664     +++ b/drivers/md/linear.c
1665     @@ -52,18 +52,26 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
1666     return conf->disks + lo;
1667     }
1668    
1669     +/*
1670     + * In linear_congested() conf->raid_disks is used as a copy of
1671     + * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks
1672     + * and conf->disks[] are created in linear_conf(), they are always
1673     + * consitent with each other, but mddev->raid_disks does not.
1674     + */
1675     static int linear_congested(struct mddev *mddev, int bits)
1676     {
1677     struct linear_conf *conf;
1678     int i, ret = 0;
1679    
1680     - conf = mddev->private;
1681     + rcu_read_lock();
1682     + conf = rcu_dereference(mddev->private);
1683    
1684     - for (i = 0; i < mddev->raid_disks && !ret ; i++) {
1685     + for (i = 0; i < conf->raid_disks && !ret ; i++) {
1686     struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
1687     ret |= bdi_congested(&q->backing_dev_info, bits);
1688     }
1689    
1690     + rcu_read_unlock();
1691     return ret;
1692     }
1693    
1694     @@ -143,6 +151,19 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
1695     conf->disks[i-1].end_sector +
1696     conf->disks[i].rdev->sectors;
1697    
1698     + /*
1699     + * conf->raid_disks is copy of mddev->raid_disks. The reason to
1700     + * keep a copy of mddev->raid_disks in struct linear_conf is,
1701     + * mddev->raid_disks may not be consistent with pointers number of
1702     + * conf->disks[] when it is updated in linear_add() and used to
1703     + * iterate old conf->disks[] earray in linear_congested().
1704     + * Here conf->raid_disks is always consitent with number of
1705     + * pointers in conf->disks[] array, and mddev->private is updated
1706     + * with rcu_assign_pointer() in linear_addr(), such race can be
1707     + * avoided.
1708     + */
1709     + conf->raid_disks = raid_disks;
1710     +
1711     return conf;
1712    
1713     out:
1714     @@ -195,15 +216,23 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
1715     if (!newconf)
1716     return -ENOMEM;
1717    
1718     + /* newconf->raid_disks already keeps a copy of * the increased
1719     + * value of mddev->raid_disks, WARN_ONCE() is just used to make
1720     + * sure of this. It is possible that oldconf is still referenced
1721     + * in linear_congested(), therefore kfree_rcu() is used to free
1722     + * oldconf until no one uses it anymore.
1723     + */
1724     mddev_suspend(mddev);
1725     - oldconf = mddev->private;
1726     + oldconf = rcu_dereference(mddev->private);
1727     mddev->raid_disks++;
1728     - mddev->private = newconf;
1729     + WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
1730     + "copied raid_disks doesn't match mddev->raid_disks");
1731     + rcu_assign_pointer(mddev->private, newconf);
1732     md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
1733     set_capacity(mddev->gendisk, mddev->array_sectors);
1734     mddev_resume(mddev);
1735     revalidate_disk(mddev->gendisk);
1736     - kfree(oldconf);
1737     + kfree_rcu(oldconf, rcu);
1738     return 0;
1739     }
1740    
1741     diff --git a/drivers/md/linear.h b/drivers/md/linear.h
1742     index b685ddd7d7f7..8d392e6098b3 100644
1743     --- a/drivers/md/linear.h
1744     +++ b/drivers/md/linear.h
1745     @@ -10,6 +10,7 @@ struct linear_conf
1746     {
1747     struct rcu_head rcu;
1748     sector_t array_sectors;
1749     + int raid_disks; /* a copy of mddev->raid_disks */
1750     struct dev_info disks[0];
1751     };
1752     #endif
1753     diff --git a/drivers/media/dvb-frontends/cxd2820r_core.c b/drivers/media/dvb-frontends/cxd2820r_core.c
1754     index 95267c6edb3a..f6ebbb47b9b2 100644
1755     --- a/drivers/media/dvb-frontends/cxd2820r_core.c
1756     +++ b/drivers/media/dvb-frontends/cxd2820r_core.c
1757     @@ -615,6 +615,7 @@ static int cxd2820r_probe(struct i2c_client *client,
1758     }
1759    
1760     priv->client[0] = client;
1761     + priv->fe.demodulator_priv = priv;
1762     priv->i2c = client->adapter;
1763     priv->ts_mode = pdata->ts_mode;
1764     priv->ts_clk_inv = pdata->ts_clk_inv;
1765     @@ -697,7 +698,6 @@ static int cxd2820r_probe(struct i2c_client *client,
1766     memcpy(&priv->fe.ops, &cxd2820r_ops, sizeof(priv->fe.ops));
1767     if (!pdata->attach_in_use)
1768     priv->fe.ops.release = NULL;
1769     - priv->fe.demodulator_priv = priv;
1770     i2c_set_clientdata(client, priv);
1771    
1772     /* Setup callbacks */
1773     diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
1774     index 2783531f9fc0..4462d8c69d57 100644
1775     --- a/drivers/media/media-device.c
1776     +++ b/drivers/media/media-device.c
1777     @@ -130,7 +130,7 @@ static long media_device_enum_entities(struct media_device *mdev,
1778     * old range.
1779     */
1780     if (ent->function < MEDIA_ENT_F_OLD_BASE ||
1781     - ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
1782     + ent->function > MEDIA_ENT_F_TUNER) {
1783     if (is_media_entity_v4l2_subdev(ent))
1784     entd->type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
1785     else if (ent->function != MEDIA_ENT_F_IO_V4L)
1786     diff --git a/drivers/media/pci/dm1105/Kconfig b/drivers/media/pci/dm1105/Kconfig
1787     index 173daf0c0847..14fa7e40f2a6 100644
1788     --- a/drivers/media/pci/dm1105/Kconfig
1789     +++ b/drivers/media/pci/dm1105/Kconfig
1790     @@ -1,6 +1,6 @@
1791     config DVB_DM1105
1792     tristate "SDMC DM1105 based PCI cards"
1793     - depends on DVB_CORE && PCI && I2C
1794     + depends on DVB_CORE && PCI && I2C && I2C_ALGOBIT
1795     select DVB_PLL if MEDIA_SUBDRV_AUTOSELECT
1796     select DVB_STV0299 if MEDIA_SUBDRV_AUTOSELECT
1797     select DVB_STV0288 if MEDIA_SUBDRV_AUTOSELECT
1798     diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
1799     index b33b9e35e60e..05489a401c5c 100644
1800     --- a/drivers/media/platform/am437x/am437x-vpfe.c
1801     +++ b/drivers/media/platform/am437x/am437x-vpfe.c
1802     @@ -1576,7 +1576,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
1803     return -EBUSY;
1804     }
1805    
1806     - ret = vpfe_try_fmt(file, priv, &format);
1807     + ret = __vpfe_get_format(vpfe, &format, &bpp);
1808     if (ret)
1809     return ret;
1810    
1811     diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
1812     index 91f9bb87ce68..6ebe89551961 100644
1813     --- a/drivers/media/rc/lirc_dev.c
1814     +++ b/drivers/media/rc/lirc_dev.c
1815     @@ -589,7 +589,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1816     result = put_user(ir->d.features, (__u32 __user *)arg);
1817     break;
1818     case LIRC_GET_REC_MODE:
1819     - if (LIRC_CAN_REC(ir->d.features)) {
1820     + if (!LIRC_CAN_REC(ir->d.features)) {
1821     result = -ENOTTY;
1822     break;
1823     }
1824     @@ -599,7 +599,7 @@ long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1825     (__u32 __user *)arg);
1826     break;
1827     case LIRC_SET_REC_MODE:
1828     - if (LIRC_CAN_REC(ir->d.features)) {
1829     + if (!LIRC_CAN_REC(ir->d.features)) {
1830     result = -ENOTTY;
1831     break;
1832     }
1833     diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
1834     index 77edd206d345..40e5a6b54955 100644
1835     --- a/drivers/media/usb/uvc/uvc_queue.c
1836     +++ b/drivers/media/usb/uvc/uvc_queue.c
1837     @@ -412,7 +412,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
1838     nextbuf = NULL;
1839     spin_unlock_irqrestore(&queue->irqlock, flags);
1840    
1841     - buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
1842     + buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
1843     vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
1844     vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
1845    
1846     diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
1847     index fa50635512e8..41f318631c6d 100644
1848     --- a/drivers/misc/mei/main.c
1849     +++ b/drivers/misc/mei/main.c
1850     @@ -182,32 +182,36 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
1851     goto out;
1852     }
1853    
1854     - if (rets == -EBUSY &&
1855     - !mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) {
1856     - rets = -ENOMEM;
1857     - goto out;
1858     - }
1859    
1860     - do {
1861     - mutex_unlock(&dev->device_lock);
1862     -
1863     - if (wait_event_interruptible(cl->rx_wait,
1864     - (!list_empty(&cl->rd_completed)) ||
1865     - (!mei_cl_is_connected(cl)))) {
1866     +again:
1867     + mutex_unlock(&dev->device_lock);
1868     + if (wait_event_interruptible(cl->rx_wait,
1869     + !list_empty(&cl->rd_completed) ||
1870     + !mei_cl_is_connected(cl))) {
1871     + if (signal_pending(current))
1872     + return -EINTR;
1873     + return -ERESTARTSYS;
1874     + }
1875     + mutex_lock(&dev->device_lock);
1876    
1877     - if (signal_pending(current))
1878     - return -EINTR;
1879     - return -ERESTARTSYS;
1880     - }
1881     + if (!mei_cl_is_connected(cl)) {
1882     + rets = -ENODEV;
1883     + goto out;
1884     + }
1885    
1886     - mutex_lock(&dev->device_lock);
1887     - if (!mei_cl_is_connected(cl)) {
1888     - rets = -ENODEV;
1889     - goto out;
1890     - }
1891     + cb = mei_cl_read_cb(cl, file);
1892     + if (!cb) {
1893     + /*
1894     + * For amthif all the waiters are woken up,
1895     + * but only fp with matching cb->fp get the cb,
1896     + * the others have to return to wait on read.
1897     + */
1898     + if (cl == &dev->iamthif_cl)
1899     + goto again;
1900    
1901     - cb = mei_cl_read_cb(cl, file);
1902     - } while (!cb);
1903     + rets = 0;
1904     + goto out;
1905     + }
1906    
1907     copy_buffer:
1908     /* now copy the data to user space */
1909     diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
1910     index fddd0be196f4..80918abfc468 100644
1911     --- a/drivers/mmc/host/sdhci-acpi.c
1912     +++ b/drivers/mmc/host/sdhci-acpi.c
1913     @@ -466,7 +466,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
1914     if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
1915     bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
1916    
1917     - if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL)) {
1918     + err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL);
1919     + if (err) {
1920     + if (err == -EPROBE_DEFER)
1921     + goto err_free;
1922     dev_warn(dev, "failed to setup card detect gpio\n");
1923     c->use_runtime_pm = false;
1924     }
1925     diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
1926     index 0a177b1bfe3e..d1570f512f0b 100644
1927     --- a/drivers/mtd/nand/fsl_ifc_nand.c
1928     +++ b/drivers/mtd/nand/fsl_ifc_nand.c
1929     @@ -258,9 +258,15 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
1930     int bufnum = nctrl->page & priv->bufnum_mask;
1931     int sector = bufnum * chip->ecc.steps;
1932     int sector_end = sector + chip->ecc.steps - 1;
1933     + __be32 *eccstat_regs;
1934     +
1935     + if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
1936     + eccstat_regs = ifc->ifc_nand.v2_nand_eccstat;
1937     + else
1938     + eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
1939    
1940     for (i = sector / 4; i <= sector_end / 4; i++)
1941     - eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
1942     + eccstat[i] = ifc_in32(&eccstat_regs[i]);
1943    
1944     for (i = sector; i <= sector_end; i++) {
1945     errors = check_read_ecc(mtd, ctrl, eccstat, i);
1946     diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
1947     index 77e3cc06a30c..a0dabd4038ba 100644
1948     --- a/drivers/net/can/usb/gs_usb.c
1949     +++ b/drivers/net/can/usb/gs_usb.c
1950     @@ -908,10 +908,14 @@ static int gs_usb_probe(struct usb_interface *intf,
1951     struct gs_usb *dev;
1952     int rc = -ENOMEM;
1953     unsigned int icount, i;
1954     - struct gs_host_config hconf = {
1955     - .byte_order = 0x0000beef,
1956     - };
1957     - struct gs_device_config dconf;
1958     + struct gs_host_config *hconf;
1959     + struct gs_device_config *dconf;
1960     +
1961     + hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
1962     + if (!hconf)
1963     + return -ENOMEM;
1964     +
1965     + hconf->byte_order = 0x0000beef;
1966    
1967     /* send host config */
1968     rc = usb_control_msg(interface_to_usbdev(intf),
1969     @@ -920,16 +924,22 @@ static int gs_usb_probe(struct usb_interface *intf,
1970     USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1971     1,
1972     intf->altsetting[0].desc.bInterfaceNumber,
1973     - &hconf,
1974     - sizeof(hconf),
1975     + hconf,
1976     + sizeof(*hconf),
1977     1000);
1978    
1979     + kfree(hconf);
1980     +
1981     if (rc < 0) {
1982     dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
1983     rc);
1984     return rc;
1985     }
1986    
1987     + dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
1988     + if (!dconf)
1989     + return -ENOMEM;
1990     +
1991     /* read device config */
1992     rc = usb_control_msg(interface_to_usbdev(intf),
1993     usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
1994     @@ -937,28 +947,33 @@ static int gs_usb_probe(struct usb_interface *intf,
1995     USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
1996     1,
1997     intf->altsetting[0].desc.bInterfaceNumber,
1998     - &dconf,
1999     - sizeof(dconf),
2000     + dconf,
2001     + sizeof(*dconf),
2002     1000);
2003     if (rc < 0) {
2004     dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
2005     rc);
2006     + kfree(dconf);
2007     return rc;
2008     }
2009    
2010     - icount = dconf.icount + 1;
2011     + icount = dconf->icount + 1;
2012     dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
2013    
2014     if (icount > GS_MAX_INTF) {
2015     dev_err(&intf->dev,
2016     "Driver cannot handle more that %d CAN interfaces\n",
2017     GS_MAX_INTF);
2018     + kfree(dconf);
2019     return -EINVAL;
2020     }
2021    
2022     dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2023     - if (!dev)
2024     + if (!dev) {
2025     + kfree(dconf);
2026     return -ENOMEM;
2027     + }
2028     +
2029     init_usb_anchor(&dev->rx_submitted);
2030    
2031     atomic_set(&dev->active_channels, 0);
2032     @@ -967,7 +982,7 @@ static int gs_usb_probe(struct usb_interface *intf,
2033     dev->udev = interface_to_usbdev(intf);
2034    
2035     for (i = 0; i < icount; i++) {
2036     - dev->canch[i] = gs_make_candev(i, intf, &dconf);
2037     + dev->canch[i] = gs_make_candev(i, intf, dconf);
2038     if (IS_ERR_OR_NULL(dev->canch[i])) {
2039     /* save error code to return later */
2040     rc = PTR_ERR(dev->canch[i]);
2041     @@ -978,12 +993,15 @@ static int gs_usb_probe(struct usb_interface *intf,
2042     gs_destroy_candev(dev->canch[i]);
2043    
2044     usb_kill_anchored_urbs(&dev->rx_submitted);
2045     + kfree(dconf);
2046     kfree(dev);
2047     return rc;
2048     }
2049     dev->canch[i]->parent = dev;
2050     }
2051    
2052     + kfree(dconf);
2053     +
2054     return 0;
2055     }
2056    
2057     diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
2058     index 108a30e15097..d000cb62d6ae 100644
2059     --- a/drivers/net/can/usb/usb_8dev.c
2060     +++ b/drivers/net/can/usb/usb_8dev.c
2061     @@ -951,8 +951,8 @@ static int usb_8dev_probe(struct usb_interface *intf,
2062     for (i = 0; i < MAX_TX_URBS; i++)
2063     priv->tx_contexts[i].echo_index = MAX_TX_URBS;
2064    
2065     - priv->cmd_msg_buffer = kzalloc(sizeof(struct usb_8dev_cmd_msg),
2066     - GFP_KERNEL);
2067     + priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg),
2068     + GFP_KERNEL);
2069     if (!priv->cmd_msg_buffer)
2070     goto cleanup_candev;
2071    
2072     @@ -966,7 +966,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
2073     if (err) {
2074     netdev_err(netdev,
2075     "couldn't register CAN device: %d\n", err);
2076     - goto cleanup_cmd_msg_buffer;
2077     + goto cleanup_candev;
2078     }
2079    
2080     err = usb_8dev_cmd_version(priv, &version);
2081     @@ -987,9 +987,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
2082     cleanup_unregister_candev:
2083     unregister_netdev(priv->netdev);
2084    
2085     -cleanup_cmd_msg_buffer:
2086     - kfree(priv->cmd_msg_buffer);
2087     -
2088     cleanup_candev:
2089     free_candev(netdev);
2090    
2091     diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
2092     index 0c4532227f25..972b5e224d5d 100644
2093     --- a/drivers/net/wireless/ath/ath10k/core.c
2094     +++ b/drivers/net/wireless/ath/ath10k/core.c
2095     @@ -1901,7 +1901,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
2096     ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
2097     ar->hw->wiphy->fw_version);
2098    
2099     - if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
2100     + if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
2101     + mode == ATH10K_FIRMWARE_MODE_NORMAL) {
2102     val = 0;
2103     if (ath10k_peer_stats_enabled(ar))
2104     val = WMI_10_4_PEER_STATS;
2105     @@ -1954,10 +1955,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
2106     * possible to implicitly make it correct by creating a dummy vdev and
2107     * then deleting it.
2108     */
2109     - status = ath10k_core_reset_rx_filter(ar);
2110     - if (status) {
2111     - ath10k_err(ar, "failed to reset rx filter: %d\n", status);
2112     - goto err_hif_stop;
2113     + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
2114     + status = ath10k_core_reset_rx_filter(ar);
2115     + if (status) {
2116     + ath10k_err(ar,
2117     + "failed to reset rx filter: %d\n", status);
2118     + goto err_hif_stop;
2119     + }
2120     }
2121    
2122     /* If firmware indicates Full Rx Reorder support it must be used in a
2123     diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
2124     index dc44cfef7517..16e052d02c94 100644
2125     --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
2126     +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
2127     @@ -502,8 +502,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2128     break;
2129     return -EOPNOTSUPP;
2130     default:
2131     - WARN_ON(1);
2132     - return -EINVAL;
2133     + return -EOPNOTSUPP;
2134     }
2135    
2136     mutex_lock(&ah->lock);
2137     diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
2138     index 107bcfbbe0fb..cb37bf01920e 100644
2139     --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
2140     +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
2141     @@ -73,13 +73,13 @@
2142     #define AR9300_OTP_BASE \
2143     ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
2144     #define AR9300_OTP_STATUS \
2145     - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
2146     + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18)
2147     #define AR9300_OTP_STATUS_TYPE 0x7
2148     #define AR9300_OTP_STATUS_VALID 0x4
2149     #define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
2150     #define AR9300_OTP_STATUS_SM_BUSY 0x1
2151     #define AR9300_OTP_READ_DATA \
2152     - ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
2153     + ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c)
2154    
2155     enum targetPowerHTRates {
2156     HT_TARGET_RATE_0_8_16,
2157     diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
2158     index 26fc8ecfe8c4..a7316710a902 100644
2159     --- a/drivers/net/wireless/ath/ath9k/ath9k.h
2160     +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
2161     @@ -959,6 +959,7 @@ struct ath_softc {
2162     struct survey_info *cur_survey;
2163     struct survey_info survey[ATH9K_NUM_CHANNELS];
2164    
2165     + spinlock_t intr_lock;
2166     struct tasklet_struct intr_tq;
2167     struct tasklet_struct bcon_tasklet;
2168     struct ath_hw *sc_ah;
2169     diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
2170     index cfa3fe82ade3..297d4bbc5c05 100644
2171     --- a/drivers/net/wireless/ath/ath9k/init.c
2172     +++ b/drivers/net/wireless/ath/ath9k/init.c
2173     @@ -626,6 +626,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
2174     common->bt_ant_diversity = 1;
2175    
2176     spin_lock_init(&common->cc_lock);
2177     + spin_lock_init(&sc->intr_lock);
2178     spin_lock_init(&sc->sc_serial_rw);
2179     spin_lock_init(&sc->sc_pm_lock);
2180     spin_lock_init(&sc->chan_lock);
2181     diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
2182     index bba85d1a6cd1..d937c39b3a0b 100644
2183     --- a/drivers/net/wireless/ath/ath9k/mac.c
2184     +++ b/drivers/net/wireless/ath/ath9k/mac.c
2185     @@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
2186     }
2187     EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
2188    
2189     -void ath9k_hw_enable_interrupts(struct ath_hw *ah)
2190     +static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
2191     {
2192     struct ath_common *common = ath9k_hw_common(ah);
2193     u32 sync_default = AR_INTR_SYNC_DEFAULT;
2194     u32 async_mask;
2195    
2196     - if (!(ah->imask & ATH9K_INT_GLOBAL))
2197     - return;
2198     -
2199     - if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
2200     - ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
2201     - atomic_read(&ah->intr_ref_cnt));
2202     - return;
2203     - }
2204     -
2205     if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
2206     AR_SREV_9561(ah))
2207     sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
2208     @@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
2209     ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
2210     REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
2211     }
2212     +
2213     +void ath9k_hw_resume_interrupts(struct ath_hw *ah)
2214     +{
2215     + struct ath_common *common = ath9k_hw_common(ah);
2216     +
2217     + if (!(ah->imask & ATH9K_INT_GLOBAL))
2218     + return;
2219     +
2220     + if (atomic_read(&ah->intr_ref_cnt) != 0) {
2221     + ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
2222     + atomic_read(&ah->intr_ref_cnt));
2223     + return;
2224     + }
2225     +
2226     + __ath9k_hw_enable_interrupts(ah);
2227     +}
2228     +EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
2229     +
2230     +void ath9k_hw_enable_interrupts(struct ath_hw *ah)
2231     +{
2232     + struct ath_common *common = ath9k_hw_common(ah);
2233     +
2234     + if (!(ah->imask & ATH9K_INT_GLOBAL))
2235     + return;
2236     +
2237     + if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
2238     + ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
2239     + atomic_read(&ah->intr_ref_cnt));
2240     + return;
2241     + }
2242     +
2243     + __ath9k_hw_enable_interrupts(ah);
2244     +}
2245     EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
2246    
2247     void ath9k_hw_set_interrupts(struct ath_hw *ah)
2248     diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
2249     index 3bab01435a86..770fc11b41d1 100644
2250     --- a/drivers/net/wireless/ath/ath9k/mac.h
2251     +++ b/drivers/net/wireless/ath/ath9k/mac.h
2252     @@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah);
2253     void ath9k_hw_enable_interrupts(struct ath_hw *ah);
2254     void ath9k_hw_disable_interrupts(struct ath_hw *ah);
2255     void ath9k_hw_kill_interrupts(struct ath_hw *ah);
2256     +void ath9k_hw_resume_interrupts(struct ath_hw *ah);
2257    
2258     void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
2259    
2260     diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
2261     index e9f32b52fc8c..b868f02ced89 100644
2262     --- a/drivers/net/wireless/ath/ath9k/main.c
2263     +++ b/drivers/net/wireless/ath/ath9k/main.c
2264     @@ -373,21 +373,20 @@ void ath9k_tasklet(unsigned long data)
2265     struct ath_common *common = ath9k_hw_common(ah);
2266     enum ath_reset_type type;
2267     unsigned long flags;
2268     - u32 status = sc->intrstatus;
2269     + u32 status;
2270     u32 rxmask;
2271    
2272     + spin_lock_irqsave(&sc->intr_lock, flags);
2273     + status = sc->intrstatus;
2274     + sc->intrstatus = 0;
2275     + spin_unlock_irqrestore(&sc->intr_lock, flags);
2276     +
2277     ath9k_ps_wakeup(sc);
2278     spin_lock(&sc->sc_pcu_lock);
2279    
2280     if (status & ATH9K_INT_FATAL) {
2281     type = RESET_TYPE_FATAL_INT;
2282     ath9k_queue_reset(sc, type);
2283     -
2284     - /*
2285     - * Increment the ref. counter here so that
2286     - * interrupts are enabled in the reset routine.
2287     - */
2288     - atomic_inc(&ah->intr_ref_cnt);
2289     ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
2290     goto out;
2291     }
2292     @@ -403,11 +402,6 @@ void ath9k_tasklet(unsigned long data)
2293     type = RESET_TYPE_BB_WATCHDOG;
2294     ath9k_queue_reset(sc, type);
2295    
2296     - /*
2297     - * Increment the ref. counter here so that
2298     - * interrupts are enabled in the reset routine.
2299     - */
2300     - atomic_inc(&ah->intr_ref_cnt);
2301     ath_dbg(common, RESET,
2302     "BB_WATCHDOG: Skipping interrupts\n");
2303     goto out;
2304     @@ -420,7 +414,6 @@ void ath9k_tasklet(unsigned long data)
2305     if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
2306     type = RESET_TYPE_TX_GTT;
2307     ath9k_queue_reset(sc, type);
2308     - atomic_inc(&ah->intr_ref_cnt);
2309     ath_dbg(common, RESET,
2310     "GTT: Skipping interrupts\n");
2311     goto out;
2312     @@ -477,7 +470,7 @@ void ath9k_tasklet(unsigned long data)
2313     ath9k_btcoex_handle_interrupt(sc, status);
2314    
2315     /* re-enable hardware interrupt */
2316     - ath9k_hw_enable_interrupts(ah);
2317     + ath9k_hw_resume_interrupts(ah);
2318     out:
2319     spin_unlock(&sc->sc_pcu_lock);
2320     ath9k_ps_restore(sc);
2321     @@ -541,7 +534,9 @@ irqreturn_t ath_isr(int irq, void *dev)
2322     return IRQ_NONE;
2323    
2324     /* Cache the status */
2325     - sc->intrstatus = status;
2326     + spin_lock(&sc->intr_lock);
2327     + sc->intrstatus |= status;
2328     + spin_unlock(&sc->intr_lock);
2329    
2330     if (status & SCHED_INTR)
2331     sched = true;
2332     @@ -587,7 +582,7 @@ irqreturn_t ath_isr(int irq, void *dev)
2333    
2334     if (sched) {
2335     /* turn off every interrupt */
2336     - ath9k_hw_disable_interrupts(ah);
2337     + ath9k_hw_kill_interrupts(ah);
2338     tasklet_schedule(&sc->intr_tq);
2339     }
2340    
2341     diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
2342     index b951ebac15ea..d2f4dd470fdb 100644
2343     --- a/drivers/net/wireless/realtek/rtlwifi/pci.h
2344     +++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
2345     @@ -275,10 +275,10 @@ struct mp_adapter {
2346     };
2347    
2348     struct rtl_pci_priv {
2349     + struct bt_coexist_info bt_coexist;
2350     + struct rtl_led_ctl ledctl;
2351     struct rtl_pci dev;
2352     struct mp_adapter ndis_adapter;
2353     - struct rtl_led_ctl ledctl;
2354     - struct bt_coexist_info bt_coexist;
2355     };
2356    
2357     #define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
2358     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
2359     index ebf663e1a81a..cab4601eba8e 100644
2360     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
2361     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
2362     @@ -1006,7 +1006,7 @@ static void _rtl92ee_hw_configure(struct ieee80211_hw *hw)
2363     rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a);
2364    
2365     /* Note Data sheet don't define */
2366     - rtl_write_word(rtlpriv, 0x4C7, 0x80);
2367     + rtl_write_byte(rtlpriv, 0x4C7, 0x80);
2368    
2369     rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20);
2370    
2371     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2372     index 1281ebe0c30a..2cbef9647acc 100644
2373     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2374     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2375     @@ -1128,7 +1128,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
2376     }
2377     if (0 == tmp) {
2378     read_addr = REG_DBI_RDATA + addr % 4;
2379     - ret = rtl_read_word(rtlpriv, read_addr);
2380     + ret = rtl_read_byte(rtlpriv, read_addr);
2381     }
2382     return ret;
2383     }
2384     diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
2385     index 685273ca9561..441c4412130c 100644
2386     --- a/drivers/net/wireless/realtek/rtlwifi/usb.h
2387     +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
2388     @@ -150,8 +150,9 @@ struct rtl_usb {
2389     };
2390    
2391     struct rtl_usb_priv {
2392     - struct rtl_usb dev;
2393     + struct bt_coexist_info bt_coexist;
2394     struct rtl_led_ctl ledctl;
2395     + struct rtl_usb dev;
2396     };
2397    
2398     #define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
2399     diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
2400     index 763ff8745828..61fc349c96d4 100644
2401     --- a/drivers/pci/host/pci-hyperv.c
2402     +++ b/drivers/pci/host/pci-hyperv.c
2403     @@ -130,7 +130,8 @@ union pci_version {
2404     */
2405     union win_slot_encoding {
2406     struct {
2407     - u32 func:8;
2408     + u32 dev:5;
2409     + u32 func:3;
2410     u32 reserved:24;
2411     } bits;
2412     u32 slot;
2413     @@ -483,7 +484,8 @@ static u32 devfn_to_wslot(int devfn)
2414     union win_slot_encoding wslot;
2415    
2416     wslot.slot = 0;
2417     - wslot.bits.func = PCI_SLOT(devfn) | (PCI_FUNC(devfn) << 5);
2418     + wslot.bits.dev = PCI_SLOT(devfn);
2419     + wslot.bits.func = PCI_FUNC(devfn);
2420    
2421     return wslot.slot;
2422     }
2423     @@ -501,7 +503,7 @@ static int wslot_to_devfn(u32 wslot)
2424     union win_slot_encoding slot_no;
2425    
2426     slot_no.slot = wslot;
2427     - return PCI_DEVFN(0, slot_no.bits.func);
2428     + return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
2429     }
2430    
2431     /*
2432     diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
2433     index b0ac4dfafa0b..f2907e7adb5d 100644
2434     --- a/drivers/pci/host/pcie-altera.c
2435     +++ b/drivers/pci/host/pcie-altera.c
2436     @@ -57,10 +57,14 @@
2437     #define TLP_WRITE_TAG 0x10
2438     #define RP_DEVFN 0
2439     #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
2440     -#define TLP_CFG_DW0(pcie, bus) \
2441     +#define TLP_CFGRD_DW0(pcie, bus) \
2442     ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \
2443     : TLP_FMTTYPE_CFGRD1) << 24) | \
2444     TLP_PAYLOAD_SIZE)
2445     +#define TLP_CFGWR_DW0(pcie, bus) \
2446     + ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \
2447     + : TLP_FMTTYPE_CFGWR1) << 24) | \
2448     + TLP_PAYLOAD_SIZE)
2449     #define TLP_CFG_DW1(pcie, tag, be) \
2450     (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
2451     #define TLP_CFG_DW2(bus, devfn, offset) \
2452     @@ -222,7 +226,7 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn,
2453     {
2454     u32 headers[TLP_HDR_SIZE];
2455    
2456     - headers[0] = TLP_CFG_DW0(pcie, bus);
2457     + headers[0] = TLP_CFGRD_DW0(pcie, bus);
2458     headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en);
2459     headers[2] = TLP_CFG_DW2(bus, devfn, where);
2460    
2461     @@ -237,7 +241,7 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
2462     u32 headers[TLP_HDR_SIZE];
2463     int ret;
2464    
2465     - headers[0] = TLP_CFG_DW0(pcie, bus);
2466     + headers[0] = TLP_CFGWR_DW0(pcie, bus);
2467     headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en);
2468     headers[2] = TLP_CFG_DW2(bus, devfn, where);
2469    
2470     diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
2471     index 56efaf72d08e..acb2be0c8c2c 100644
2472     --- a/drivers/pci/hotplug/pnv_php.c
2473     +++ b/drivers/pci/hotplug/pnv_php.c
2474     @@ -35,9 +35,11 @@ static void pnv_php_register(struct device_node *dn);
2475     static void pnv_php_unregister_one(struct device_node *dn);
2476     static void pnv_php_unregister(struct device_node *dn);
2477    
2478     -static void pnv_php_disable_irq(struct pnv_php_slot *php_slot)
2479     +static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
2480     + bool disable_device)
2481     {
2482     struct pci_dev *pdev = php_slot->pdev;
2483     + int irq = php_slot->irq;
2484     u16 ctrl;
2485    
2486     if (php_slot->irq > 0) {
2487     @@ -56,10 +58,14 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot)
2488     php_slot->wq = NULL;
2489     }
2490    
2491     - if (pdev->msix_enabled)
2492     - pci_disable_msix(pdev);
2493     - else if (pdev->msi_enabled)
2494     - pci_disable_msi(pdev);
2495     + if (disable_device || irq > 0) {
2496     + if (pdev->msix_enabled)
2497     + pci_disable_msix(pdev);
2498     + else if (pdev->msi_enabled)
2499     + pci_disable_msi(pdev);
2500     +
2501     + pci_disable_device(pdev);
2502     + }
2503     }
2504    
2505     static void pnv_php_free_slot(struct kref *kref)
2506     @@ -68,7 +74,7 @@ static void pnv_php_free_slot(struct kref *kref)
2507     struct pnv_php_slot, kref);
2508    
2509     WARN_ON(!list_empty(&php_slot->children));
2510     - pnv_php_disable_irq(php_slot);
2511     + pnv_php_disable_irq(php_slot, false);
2512     kfree(php_slot->name);
2513     kfree(php_slot);
2514     }
2515     @@ -759,7 +765,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
2516     php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
2517     if (!php_slot->wq) {
2518     dev_warn(&pdev->dev, "Cannot alloc workqueue\n");
2519     - pnv_php_disable_irq(php_slot);
2520     + pnv_php_disable_irq(php_slot, true);
2521     return;
2522     }
2523    
2524     @@ -772,7 +778,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
2525     ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
2526     php_slot->name, php_slot);
2527     if (ret) {
2528     - pnv_php_disable_irq(php_slot);
2529     + pnv_php_disable_irq(php_slot, true);
2530     dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq);
2531     return;
2532     }
2533     diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
2534     index c74c3f67b8da..02e46bbcf45d 100644
2535     --- a/drivers/power/reset/Kconfig
2536     +++ b/drivers/power/reset/Kconfig
2537     @@ -32,7 +32,7 @@ config POWER_RESET_AT91_RESET
2538    
2539     config POWER_RESET_AT91_SAMA5D2_SHDWC
2540     tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
2541     - depends on ARCH_AT91 || COMPILE_TEST
2542     + depends on ARCH_AT91
2543     default SOC_SAMA5
2544     help
2545     This driver supports the alternate shutdown controller for some Atmel
2546     diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
2547     index e9e24df35f26..2579f025b90b 100644
2548     --- a/drivers/power/reset/at91-poweroff.c
2549     +++ b/drivers/power/reset/at91-poweroff.c
2550     @@ -14,9 +14,12 @@
2551     #include <linux/io.h>
2552     #include <linux/module.h>
2553     #include <linux/of.h>
2554     +#include <linux/of_address.h>
2555     #include <linux/platform_device.h>
2556     #include <linux/printk.h>
2557    
2558     +#include <soc/at91/at91sam9_ddrsdr.h>
2559     +
2560     #define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
2561     #define AT91_SHDW_SHDW BIT(0) /* Shut Down command */
2562     #define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */
2563     @@ -50,6 +53,7 @@ static const char *shdwc_wakeup_modes[] = {
2564    
2565     static void __iomem *at91_shdwc_base;
2566     static struct clk *sclk;
2567     +static void __iomem *mpddrc_base;
2568    
2569     static void __init at91_wakeup_status(void)
2570     {
2571     @@ -73,6 +77,29 @@ static void at91_poweroff(void)
2572     writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR);
2573     }
2574    
2575     +static void at91_lpddr_poweroff(void)
2576     +{
2577     + asm volatile(
2578     + /* Align to cache lines */
2579     + ".balign 32\n\t"
2580     +
2581     + /* Ensure AT91_SHDW_CR is in the TLB by reading it */
2582     + " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
2583     +
2584     + /* Power down SDRAM0 */
2585     + " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
2586     + /* Shutdown CPU */
2587     + " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
2588     +
2589     + " b .\n\t"
2590     + :
2591     + : "r" (mpddrc_base),
2592     + "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
2593     + "r" (at91_shdwc_base),
2594     + "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
2595     + : "r0");
2596     +}
2597     +
2598     static int at91_poweroff_get_wakeup_mode(struct device_node *np)
2599     {
2600     const char *pm;
2601     @@ -124,6 +151,8 @@ static void at91_poweroff_dt_set_wakeup_mode(struct platform_device *pdev)
2602     static int __init at91_poweroff_probe(struct platform_device *pdev)
2603     {
2604     struct resource *res;
2605     + struct device_node *np;
2606     + u32 ddr_type;
2607     int ret;
2608    
2609     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2610     @@ -150,12 +179,30 @@ static int __init at91_poweroff_probe(struct platform_device *pdev)
2611    
2612     pm_power_off = at91_poweroff;
2613    
2614     + np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
2615     + if (!np)
2616     + return 0;
2617     +
2618     + mpddrc_base = of_iomap(np, 0);
2619     + of_node_put(np);
2620     +
2621     + if (!mpddrc_base)
2622     + return 0;
2623     +
2624     + ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
2625     + if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
2626     + (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
2627     + pm_power_off = at91_lpddr_poweroff;
2628     + else
2629     + iounmap(mpddrc_base);
2630     +
2631     return 0;
2632     }
2633    
2634     static int __exit at91_poweroff_remove(struct platform_device *pdev)
2635     {
2636     - if (pm_power_off == at91_poweroff)
2637     + if (pm_power_off == at91_poweroff ||
2638     + pm_power_off == at91_lpddr_poweroff)
2639     pm_power_off = NULL;
2640    
2641     clk_disable_unprepare(sclk);
2642     @@ -163,6 +210,11 @@ static int __exit at91_poweroff_remove(struct platform_device *pdev)
2643     return 0;
2644     }
2645    
2646     +static const struct of_device_id at91_ramc_of_match[] = {
2647     + { .compatible = "atmel,sama5d3-ddramc", },
2648     + { /* sentinel */ }
2649     +};
2650     +
2651     static const struct of_device_id at91_poweroff_of_match[] = {
2652     { .compatible = "atmel,at91sam9260-shdwc", },
2653     { .compatible = "atmel,at91sam9rl-shdwc", },
2654     diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
2655     index 8a5ac9706c9c..90b0b5a70ce5 100644
2656     --- a/drivers/power/reset/at91-sama5d2_shdwc.c
2657     +++ b/drivers/power/reset/at91-sama5d2_shdwc.c
2658     @@ -22,9 +22,12 @@
2659     #include <linux/io.h>
2660     #include <linux/module.h>
2661     #include <linux/of.h>
2662     +#include <linux/of_address.h>
2663     #include <linux/platform_device.h>
2664     #include <linux/printk.h>
2665    
2666     +#include <soc/at91/at91sam9_ddrsdr.h>
2667     +
2668     #define SLOW_CLOCK_FREQ 32768
2669    
2670     #define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
2671     @@ -75,6 +78,7 @@ struct shdwc {
2672     */
2673     static struct shdwc *at91_shdwc;
2674     static struct clk *sclk;
2675     +static void __iomem *mpddrc_base;
2676    
2677     static const unsigned long long sdwc_dbc_period[] = {
2678     0, 3, 32, 512, 4096, 32768,
2679     @@ -108,6 +112,29 @@ static void at91_poweroff(void)
2680     at91_shdwc->at91_shdwc_base + AT91_SHDW_CR);
2681     }
2682    
2683     +static void at91_lpddr_poweroff(void)
2684     +{
2685     + asm volatile(
2686     + /* Align to cache lines */
2687     + ".balign 32\n\t"
2688     +
2689     + /* Ensure AT91_SHDW_CR is in the TLB by reading it */
2690     + " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
2691     +
2692     + /* Power down SDRAM0 */
2693     + " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
2694     + /* Shutdown CPU */
2695     + " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
2696     +
2697     + " b .\n\t"
2698     + :
2699     + : "r" (mpddrc_base),
2700     + "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
2701     + "r" (at91_shdwc->at91_shdwc_base),
2702     + "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
2703     + : "r0");
2704     +}
2705     +
2706     static u32 at91_shdwc_debouncer_value(struct platform_device *pdev,
2707     u32 in_period_us)
2708     {
2709     @@ -212,6 +239,8 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
2710     {
2711     struct resource *res;
2712     const struct of_device_id *match;
2713     + struct device_node *np;
2714     + u32 ddr_type;
2715     int ret;
2716    
2717     if (!pdev->dev.of_node)
2718     @@ -249,6 +278,23 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
2719    
2720     pm_power_off = at91_poweroff;
2721    
2722     + np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
2723     + if (!np)
2724     + return 0;
2725     +
2726     + mpddrc_base = of_iomap(np, 0);
2727     + of_node_put(np);
2728     +
2729     + if (!mpddrc_base)
2730     + return 0;
2731     +
2732     + ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
2733     + if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
2734     + (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
2735     + pm_power_off = at91_lpddr_poweroff;
2736     + else
2737     + iounmap(mpddrc_base);
2738     +
2739     return 0;
2740     }
2741    
2742     @@ -256,7 +302,8 @@ static int __exit at91_shdwc_remove(struct platform_device *pdev)
2743     {
2744     struct shdwc *shdw = platform_get_drvdata(pdev);
2745    
2746     - if (pm_power_off == at91_poweroff)
2747     + if (pm_power_off == at91_poweroff ||
2748     + pm_power_off == at91_lpddr_poweroff)
2749     pm_power_off = NULL;
2750    
2751     /* Reset values to disable wake-up features */
2752     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
2753     index 5c1519b229e0..9faccfceb53c 100644
2754     --- a/drivers/regulator/core.c
2755     +++ b/drivers/regulator/core.c
2756     @@ -4357,12 +4357,13 @@ static void regulator_summary_show_subtree(struct seq_file *s,
2757     seq_puts(s, "\n");
2758    
2759     list_for_each_entry(consumer, &rdev->consumer_list, list) {
2760     - if (consumer->dev->class == &regulator_class)
2761     + if (consumer->dev && consumer->dev->class == &regulator_class)
2762     continue;
2763    
2764     seq_printf(s, "%*s%-*s ",
2765     (level + 1) * 3 + 1, "",
2766     - 30 - (level + 1) * 3, dev_name(consumer->dev));
2767     + 30 - (level + 1) * 3,
2768     + consumer->dev ? dev_name(consumer->dev) : "deviceless");
2769    
2770     switch (rdev->desc->type) {
2771     case REGULATOR_VOLTAGE:
2772     diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c
2773     index 114e8e4cef67..04db02d9059d 100644
2774     --- a/drivers/remoteproc/qcom_mdt_loader.c
2775     +++ b/drivers/remoteproc/qcom_mdt_loader.c
2776     @@ -115,6 +115,7 @@ int qcom_mdt_load(struct rproc *rproc,
2777     const struct elf32_phdr *phdrs;
2778     const struct elf32_phdr *phdr;
2779     const struct elf32_hdr *ehdr;
2780     + const struct firmware *seg_fw;
2781     size_t fw_name_len;
2782     char *fw_name;
2783     void *ptr;
2784     @@ -153,16 +154,16 @@ int qcom_mdt_load(struct rproc *rproc,
2785    
2786     if (phdr->p_filesz) {
2787     sprintf(fw_name + fw_name_len - 3, "b%02d", i);
2788     - ret = request_firmware(&fw, fw_name, &rproc->dev);
2789     + ret = request_firmware(&seg_fw, fw_name, &rproc->dev);
2790     if (ret) {
2791     dev_err(&rproc->dev, "failed to load %s\n",
2792     fw_name);
2793     break;
2794     }
2795    
2796     - memcpy(ptr, fw->data, fw->size);
2797     + memcpy(ptr, seg_fw->data, seg_fw->size);
2798    
2799     - release_firmware(fw);
2800     + release_firmware(seg_fw);
2801     }
2802    
2803     if (phdr->p_memsz > phdr->p_filesz)
2804     diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
2805     index e859d148aba9..0723c97ebea3 100644
2806     --- a/drivers/rtc/Kconfig
2807     +++ b/drivers/rtc/Kconfig
2808     @@ -1432,7 +1432,7 @@ config RTC_DRV_SUN4V
2809     based RTC on SUN4V systems.
2810    
2811     config RTC_DRV_SUN6I
2812     - tristate "Allwinner A31 RTC"
2813     + bool "Allwinner A31 RTC"
2814     default MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
2815     depends on ARCH_SUNXI
2816     help
2817     diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
2818     index c169a2cd4727..b0d45d23a11b 100644
2819     --- a/drivers/rtc/rtc-sun6i.c
2820     +++ b/drivers/rtc/rtc-sun6i.c
2821     @@ -37,9 +37,11 @@
2822    
2823     /* Control register */
2824     #define SUN6I_LOSC_CTRL 0x0000
2825     +#define SUN6I_LOSC_CTRL_KEY (0x16aa << 16)
2826     #define SUN6I_LOSC_CTRL_ALM_DHMS_ACC BIT(9)
2827     #define SUN6I_LOSC_CTRL_RTC_HMS_ACC BIT(8)
2828     #define SUN6I_LOSC_CTRL_RTC_YMD_ACC BIT(7)
2829     +#define SUN6I_LOSC_CTRL_EXT_OSC BIT(0)
2830     #define SUN6I_LOSC_CTRL_ACC_MASK GENMASK(9, 7)
2831    
2832     /* RTC */
2833     @@ -114,13 +116,17 @@ struct sun6i_rtc_dev {
2834     void __iomem *base;
2835     int irq;
2836     unsigned long alarm;
2837     +
2838     + spinlock_t lock;
2839     };
2840    
2841     static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id)
2842     {
2843     struct sun6i_rtc_dev *chip = (struct sun6i_rtc_dev *) id;
2844     + irqreturn_t ret = IRQ_NONE;
2845     u32 val;
2846    
2847     + spin_lock(&chip->lock);
2848     val = readl(chip->base + SUN6I_ALRM_IRQ_STA);
2849    
2850     if (val & SUN6I_ALRM_IRQ_STA_CNT_IRQ_PEND) {
2851     @@ -129,10 +135,11 @@ static irqreturn_t sun6i_rtc_alarmirq(int irq, void *id)
2852    
2853     rtc_update_irq(chip->rtc, 1, RTC_AF | RTC_IRQF);
2854    
2855     - return IRQ_HANDLED;
2856     + ret = IRQ_HANDLED;
2857     }
2858     + spin_unlock(&chip->lock);
2859    
2860     - return IRQ_NONE;
2861     + return ret;
2862     }
2863    
2864     static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip)
2865     @@ -140,6 +147,7 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip)
2866     u32 alrm_val = 0;
2867     u32 alrm_irq_val = 0;
2868     u32 alrm_wake_val = 0;
2869     + unsigned long flags;
2870    
2871     if (to) {
2872     alrm_val = SUN6I_ALRM_EN_CNT_EN;
2873     @@ -150,9 +158,11 @@ static void sun6i_rtc_setaie(int to, struct sun6i_rtc_dev *chip)
2874     chip->base + SUN6I_ALRM_IRQ_STA);
2875     }
2876    
2877     + spin_lock_irqsave(&chip->lock, flags);
2878     writel(alrm_val, chip->base + SUN6I_ALRM_EN);
2879     writel(alrm_irq_val, chip->base + SUN6I_ALRM_IRQ_EN);
2880     writel(alrm_wake_val, chip->base + SUN6I_ALARM_CONFIG);
2881     + spin_unlock_irqrestore(&chip->lock, flags);
2882     }
2883    
2884     static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
2885     @@ -191,11 +201,15 @@ static int sun6i_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
2886     static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm)
2887     {
2888     struct sun6i_rtc_dev *chip = dev_get_drvdata(dev);
2889     + unsigned long flags;
2890     u32 alrm_st;
2891     u32 alrm_en;
2892    
2893     + spin_lock_irqsave(&chip->lock, flags);
2894     alrm_en = readl(chip->base + SUN6I_ALRM_IRQ_EN);
2895     alrm_st = readl(chip->base + SUN6I_ALRM_IRQ_STA);
2896     + spin_unlock_irqrestore(&chip->lock, flags);
2897     +
2898     wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN);
2899     wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN);
2900     rtc_time_to_tm(chip->alarm, &wkalrm->time);
2901     @@ -356,6 +370,7 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
2902     chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
2903     if (!chip)
2904     return -ENOMEM;
2905     + spin_lock_init(&chip->lock);
2906    
2907     platform_set_drvdata(pdev, chip);
2908     chip->dev = &pdev->dev;
2909     @@ -404,6 +419,10 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
2910     /* disable alarm wakeup */
2911     writel(0, chip->base + SUN6I_ALARM_CONFIG);
2912    
2913     + /* switch to the external, more precise, oscillator */
2914     + writel(SUN6I_LOSC_CTRL_KEY | SUN6I_LOSC_CTRL_EXT_OSC,
2915     + chip->base + SUN6I_LOSC_CTRL);
2916     +
2917     chip->rtc = rtc_device_register("rtc-sun6i", &pdev->dev,
2918     &sun6i_rtc_ops, THIS_MODULE);
2919     if (IS_ERR(chip->rtc)) {
2920     @@ -439,9 +458,4 @@ static struct platform_driver sun6i_rtc_driver = {
2921     .of_match_table = sun6i_rtc_dt_ids,
2922     },
2923     };
2924     -
2925     -module_platform_driver(sun6i_rtc_driver);
2926     -
2927     -MODULE_DESCRIPTION("sun6i RTC driver");
2928     -MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
2929     -MODULE_LICENSE("GPL");
2930     +builtin_platform_driver(sun6i_rtc_driver);
2931     diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
2932     index 0c453880f214..7b178d765726 100644
2933     --- a/drivers/scsi/aacraid/src.c
2934     +++ b/drivers/scsi/aacraid/src.c
2935     @@ -414,16 +414,23 @@ static int aac_src_check_health(struct aac_dev *dev)
2936     u32 status = src_readl(dev, MUnit.OMR);
2937    
2938     /*
2939     + * Check to see if the board panic'd.
2940     + */
2941     + if (unlikely(status & KERNEL_PANIC))
2942     + goto err_blink;
2943     +
2944     + /*
2945     * Check to see if the board failed any self tests.
2946     */
2947     if (unlikely(status & SELF_TEST_FAILED))
2948     - return -1;
2949     + goto err_out;
2950    
2951     /*
2952     - * Check to see if the board panic'd.
2953     + * Check to see if the board failed any self tests.
2954     */
2955     - if (unlikely(status & KERNEL_PANIC))
2956     - return (status >> 16) & 0xFF;
2957     + if (unlikely(status & MONITOR_PANIC))
2958     + goto err_out;
2959     +
2960     /*
2961     * Wait for the adapter to be up and running.
2962     */
2963     @@ -433,6 +440,12 @@ static int aac_src_check_health(struct aac_dev *dev)
2964     * Everything is OK
2965     */
2966     return 0;
2967     +
2968     +err_out:
2969     + return -1;
2970     +
2971     +err_blink:
2972     + return (status > 16) & 0xFF;
2973     }
2974    
2975     /**
2976     diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
2977     index ee8022737591..55faa94637a9 100644
2978     --- a/drivers/scsi/lpfc/lpfc_hw4.h
2979     +++ b/drivers/scsi/lpfc/lpfc_hw4.h
2980     @@ -1185,6 +1185,7 @@ struct lpfc_mbx_wq_create {
2981     #define lpfc_mbx_wq_create_page_size_SHIFT 0
2982     #define lpfc_mbx_wq_create_page_size_MASK 0x000000FF
2983     #define lpfc_mbx_wq_create_page_size_WORD word1
2984     +#define LPFC_WQ_PAGE_SIZE_4096 0x1
2985     #define lpfc_mbx_wq_create_wqe_size_SHIFT 8
2986     #define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F
2987     #define lpfc_mbx_wq_create_wqe_size_WORD word1
2988     @@ -1256,6 +1257,7 @@ struct rq_context {
2989     #define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
2990     #define lpfc_rq_context_page_size_MASK 0x000000FF
2991     #define lpfc_rq_context_page_size_WORD word0
2992     +#define LPFC_RQ_PAGE_SIZE_4096 0x1
2993     uint32_t reserved1;
2994     uint32_t word2;
2995     #define lpfc_rq_context_cq_id_SHIFT 16
2996     diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
2997     index f4f77c5b0c83..49b4c798de18 100644
2998     --- a/drivers/scsi/lpfc/lpfc_sli.c
2999     +++ b/drivers/scsi/lpfc/lpfc_sli.c
3000     @@ -13678,7 +13678,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
3001     LPFC_WQ_WQE_SIZE_128);
3002     bf_set(lpfc_mbx_wq_create_page_size,
3003     &wq_create->u.request_1,
3004     - (PAGE_SIZE/SLI4_PAGE_SIZE));
3005     + LPFC_WQ_PAGE_SIZE_4096);
3006     page = wq_create->u.request_1.page;
3007     break;
3008     }
3009     @@ -13704,8 +13704,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
3010     LPFC_WQ_WQE_SIZE_128);
3011     break;
3012     }
3013     - bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
3014     - (PAGE_SIZE/SLI4_PAGE_SIZE));
3015     + bf_set(lpfc_mbx_wq_create_page_size,
3016     + &wq_create->u.request_1,
3017     + LPFC_WQ_PAGE_SIZE_4096);
3018     page = wq_create->u.request_1.page;
3019     break;
3020     default:
3021     @@ -13891,7 +13892,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
3022     LPFC_RQE_SIZE_8);
3023     bf_set(lpfc_rq_context_page_size,
3024     &rq_create->u.request.context,
3025     - (PAGE_SIZE/SLI4_PAGE_SIZE));
3026     + LPFC_RQ_PAGE_SIZE_4096);
3027     } else {
3028     switch (hrq->entry_count) {
3029     default:
3030     diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
3031     index b8d3b97b217a..84addee05be6 100644
3032     --- a/drivers/scsi/scsi_dh.c
3033     +++ b/drivers/scsi/scsi_dh.c
3034     @@ -219,20 +219,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
3035     }
3036     EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
3037    
3038     -static struct scsi_device *get_sdev_from_queue(struct request_queue *q)
3039     -{
3040     - struct scsi_device *sdev;
3041     - unsigned long flags;
3042     -
3043     - spin_lock_irqsave(q->queue_lock, flags);
3044     - sdev = q->queuedata;
3045     - if (!sdev || !get_device(&sdev->sdev_gendev))
3046     - sdev = NULL;
3047     - spin_unlock_irqrestore(q->queue_lock, flags);
3048     -
3049     - return sdev;
3050     -}
3051     -
3052     /*
3053     * scsi_dh_activate - activate the path associated with the scsi_device
3054     * corresponding to the given request queue.
3055     @@ -251,7 +237,7 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
3056     struct scsi_device *sdev;
3057     int err = SCSI_DH_NOSYS;
3058    
3059     - sdev = get_sdev_from_queue(q);
3060     + sdev = scsi_device_from_queue(q);
3061     if (!sdev) {
3062     if (fn)
3063     fn(data, err);
3064     @@ -298,7 +284,7 @@ int scsi_dh_set_params(struct request_queue *q, const char *params)
3065     struct scsi_device *sdev;
3066     int err = -SCSI_DH_NOSYS;
3067    
3068     - sdev = get_sdev_from_queue(q);
3069     + sdev = scsi_device_from_queue(q);
3070     if (!sdev)
3071     return err;
3072    
3073     @@ -321,7 +307,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name)
3074     struct scsi_device_handler *scsi_dh;
3075     int err = 0;
3076    
3077     - sdev = get_sdev_from_queue(q);
3078     + sdev = scsi_device_from_queue(q);
3079     if (!sdev)
3080     return -ENODEV;
3081    
3082     @@ -359,7 +345,7 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
3083     struct scsi_device *sdev;
3084     const char *handler_name = NULL;
3085    
3086     - sdev = get_sdev_from_queue(q);
3087     + sdev = scsi_device_from_queue(q);
3088     if (!sdev)
3089     return NULL;
3090    
3091     diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
3092     index e64eae4392a4..d8099c7cab00 100644
3093     --- a/drivers/scsi/scsi_lib.c
3094     +++ b/drivers/scsi/scsi_lib.c
3095     @@ -2127,6 +2127,29 @@ void scsi_mq_destroy_tags(struct Scsi_Host *shost)
3096     blk_mq_free_tag_set(&shost->tag_set);
3097     }
3098    
3099     +/**
3100     + * scsi_device_from_queue - return sdev associated with a request_queue
3101     + * @q: The request queue to return the sdev from
3102     + *
3103     + * Return the sdev associated with a request queue or NULL if the
3104     + * request_queue does not reference a SCSI device.
3105     + */
3106     +struct scsi_device *scsi_device_from_queue(struct request_queue *q)
3107     +{
3108     + struct scsi_device *sdev = NULL;
3109     +
3110     + if (q->mq_ops) {
3111     + if (q->mq_ops == &scsi_mq_ops)
3112     + sdev = q->queuedata;
3113     + } else if (q->request_fn == scsi_request_fn)
3114     + sdev = q->queuedata;
3115     + if (!sdev || !get_device(&sdev->sdev_gendev))
3116     + sdev = NULL;
3117     +
3118     + return sdev;
3119     +}
3120     +EXPORT_SYMBOL_GPL(scsi_device_from_queue);
3121     +
3122     /*
3123     * Function: scsi_block_requests()
3124     *
3125     diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
3126     index 8ccfc9ea874b..3f218f5cf29b 100644
3127     --- a/drivers/scsi/storvsc_drv.c
3128     +++ b/drivers/scsi/storvsc_drv.c
3129     @@ -136,6 +136,8 @@ struct hv_fc_wwn_packet {
3130     #define SRB_FLAGS_PORT_DRIVER_RESERVED 0x0F000000
3131     #define SRB_FLAGS_CLASS_DRIVER_RESERVED 0xF0000000
3132    
3133     +#define SP_UNTAGGED ((unsigned char) ~0)
3134     +#define SRB_SIMPLE_TAG_REQUEST 0x20
3135    
3136     /*
3137     * Platform neutral description of a scsi request -
3138     @@ -375,6 +377,7 @@ enum storvsc_request_type {
3139     #define SRB_STATUS_SUCCESS 0x01
3140     #define SRB_STATUS_ABORTED 0x02
3141     #define SRB_STATUS_ERROR 0x04
3142     +#define SRB_STATUS_DATA_OVERRUN 0x12
3143    
3144     #define SRB_STATUS(status) \
3145     (status & ~(SRB_STATUS_AUTOSENSE_VALID | SRB_STATUS_QUEUE_FROZEN))
3146     @@ -889,6 +892,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
3147     switch (SRB_STATUS(vm_srb->srb_status)) {
3148     case SRB_STATUS_ERROR:
3149     /*
3150     + * Let upper layer deal with error when
3151     + * sense message is present.
3152     + */
3153     +
3154     + if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)
3155     + break;
3156     + /*
3157     * If there is an error; offline the device since all
3158     * error recovery strategies would have already been
3159     * deployed on the host side. However, if the command
3160     @@ -953,6 +963,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
3161     struct scsi_cmnd *scmnd = cmd_request->cmd;
3162     struct scsi_sense_hdr sense_hdr;
3163     struct vmscsi_request *vm_srb;
3164     + u32 data_transfer_length;
3165     struct Scsi_Host *host;
3166     u32 payload_sz = cmd_request->payload_sz;
3167     void *payload = cmd_request->payload;
3168     @@ -960,6 +971,7 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
3169     host = stor_dev->host;
3170    
3171     vm_srb = &cmd_request->vstor_packet.vm_srb;
3172     + data_transfer_length = vm_srb->data_transfer_length;
3173    
3174     scmnd->result = vm_srb->scsi_status;
3175    
3176     @@ -973,13 +985,20 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request,
3177     &sense_hdr);
3178     }
3179    
3180     - if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
3181     + if (vm_srb->srb_status != SRB_STATUS_SUCCESS) {
3182     storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
3183     sense_hdr.ascq);
3184     + /*
3185     + * The Windows driver set data_transfer_length on
3186     + * SRB_STATUS_DATA_OVERRUN. On other errors, this value
3187     + * is untouched. In these cases we set it to 0.
3188     + */
3189     + if (vm_srb->srb_status != SRB_STATUS_DATA_OVERRUN)
3190     + data_transfer_length = 0;
3191     + }
3192    
3193     scsi_set_resid(scmnd,
3194     - cmd_request->payload->range.len -
3195     - vm_srb->data_transfer_length);
3196     + cmd_request->payload->range.len - data_transfer_length);
3197    
3198     scmnd->scsi_done(scmnd);
3199    
3200     @@ -1451,6 +1470,13 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
3201     vm_srb->win8_extension.srb_flags |=
3202     SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
3203    
3204     + if (scmnd->device->tagged_supported) {
3205     + vm_srb->win8_extension.srb_flags |=
3206     + (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE);
3207     + vm_srb->win8_extension.queue_tag = SP_UNTAGGED;
3208     + vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST;
3209     + }
3210     +
3211     /* Build the SRB */
3212     switch (scmnd->sc_data_direction) {
3213     case DMA_TO_DEVICE:
3214     diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
3215     index 3c09e94cf827..186342b74141 100644
3216     --- a/drivers/spi/spi-s3c64xx.c
3217     +++ b/drivers/spi/spi-s3c64xx.c
3218     @@ -1003,7 +1003,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
3219     sci->num_cs = temp;
3220     }
3221    
3222     - sci->no_cs = of_property_read_bool(dev->of_node, "broken-cs");
3223     + sci->no_cs = of_property_read_bool(dev->of_node, "no-cs-readback");
3224    
3225     return sci;
3226     }
3227     diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
3228     index 7882306adeca..29dc249b0c74 100644
3229     --- a/drivers/staging/greybus/loopback.c
3230     +++ b/drivers/staging/greybus/loopback.c
3231     @@ -1051,8 +1051,13 @@ static int gb_loopback_fn(void *data)
3232     gb_loopback_calculate_stats(gb, !!error);
3233     }
3234     gb->send_count++;
3235     - if (us_wait)
3236     - udelay(us_wait);
3237     +
3238     + if (us_wait) {
3239     + if (us_wait < 20000)
3240     + usleep_range(us_wait, us_wait + 100);
3241     + else
3242     + msleep(us_wait / 1000);
3243     + }
3244     }
3245    
3246     gb_pm_runtime_put_autosuspend(bundle);
3247     diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
3248     index f5619d8744ef..0256d65dfcd8 100644
3249     --- a/drivers/staging/lustre/lnet/selftest/rpc.c
3250     +++ b/drivers/staging/lustre/lnet/selftest/rpc.c
3251     @@ -252,7 +252,7 @@ srpc_service_init(struct srpc_service *svc)
3252     svc->sv_shuttingdown = 0;
3253    
3254     svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
3255     - sizeof(*svc->sv_cpt_data));
3256     + sizeof(**svc->sv_cpt_data));
3257     if (!svc->sv_cpt_data)
3258     return -ENOMEM;
3259    
3260     diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
3261     index b87cbbbee054..b39fd1e9b4a0 100644
3262     --- a/drivers/staging/rtl8188eu/core/rtw_recv.c
3263     +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
3264     @@ -1383,6 +1383,9 @@ static int wlanhdr_to_ethhdr(struct recv_frame *precvframe)
3265     ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
3266     }
3267    
3268     + if (!ptr)
3269     + return _FAIL;
3270     +
3271     memcpy(ptr, pattrib->dst, ETH_ALEN);
3272     memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
3273    
3274     diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
3275     index cbd2e51ba42b..cedf25b0b093 100644
3276     --- a/drivers/staging/rtl8712/rtl871x_recv.c
3277     +++ b/drivers/staging/rtl8712/rtl871x_recv.c
3278     @@ -643,11 +643,16 @@ sint r8712_wlanhdr_to_ethhdr(union recv_frame *precvframe)
3279     /* append rx status for mp test packets */
3280     ptr = recvframe_pull(precvframe, (rmv_len -
3281     sizeof(struct ethhdr) + 2) - 24);
3282     + if (!ptr)
3283     + return _FAIL;
3284     memcpy(ptr, get_rxmem(precvframe), 24);
3285     ptr += 24;
3286     - } else
3287     + } else {
3288     ptr = recvframe_pull(precvframe, (rmv_len -
3289     sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
3290     + if (!ptr)
3291     + return _FAIL;
3292     + }
3293    
3294     memcpy(ptr, pattrib->dst, ETH_ALEN);
3295     memcpy(ptr + ETH_ALEN, pattrib->src, ETH_ALEN);
3296     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3297     index d2b860ebfe13..5dc6bfc91f4b 100644
3298     --- a/drivers/usb/dwc3/gadget.c
3299     +++ b/drivers/usb/dwc3/gadget.c
3300     @@ -1234,6 +1234,9 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
3301     unsigned transfer_in_flight;
3302     unsigned started;
3303    
3304     + if (dep->flags & DWC3_EP_STALL)
3305     + return 0;
3306     +
3307     if (dep->number > 1)
3308     trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
3309     else
3310     @@ -1258,6 +1261,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
3311     else
3312     dep->flags |= DWC3_EP_STALL;
3313     } else {
3314     + if (!(dep->flags & DWC3_EP_STALL))
3315     + return 0;
3316    
3317     ret = dwc3_send_clear_stall_ep_cmd(dep);
3318     if (ret)
3319     diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
3320     index e2966f87c860..b0f71957d00b 100644
3321     --- a/drivers/usb/gadget/function/f_hid.c
3322     +++ b/drivers/usb/gadget/function/f_hid.c
3323     @@ -50,12 +50,12 @@ struct f_hidg {
3324    
3325     /* recv report */
3326     struct list_head completed_out_req;
3327     - spinlock_t spinlock;
3328     + spinlock_t read_spinlock;
3329     wait_queue_head_t read_queue;
3330     unsigned int qlen;
3331    
3332     /* send report */
3333     - struct mutex lock;
3334     + spinlock_t write_spinlock;
3335     bool write_pending;
3336     wait_queue_head_t write_queue;
3337     struct usb_request *req;
3338     @@ -204,28 +204,35 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
3339     if (!access_ok(VERIFY_WRITE, buffer, count))
3340     return -EFAULT;
3341    
3342     - spin_lock_irqsave(&hidg->spinlock, flags);
3343     + spin_lock_irqsave(&hidg->read_spinlock, flags);
3344    
3345     #define READ_COND (!list_empty(&hidg->completed_out_req))
3346    
3347     /* wait for at least one buffer to complete */
3348     while (!READ_COND) {
3349     - spin_unlock_irqrestore(&hidg->spinlock, flags);
3350     + spin_unlock_irqrestore(&hidg->read_spinlock, flags);
3351     if (file->f_flags & O_NONBLOCK)
3352     return -EAGAIN;
3353    
3354     if (wait_event_interruptible(hidg->read_queue, READ_COND))
3355     return -ERESTARTSYS;
3356    
3357     - spin_lock_irqsave(&hidg->spinlock, flags);
3358     + spin_lock_irqsave(&hidg->read_spinlock, flags);
3359     }
3360    
3361     /* pick the first one */
3362     list = list_first_entry(&hidg->completed_out_req,
3363     struct f_hidg_req_list, list);
3364     +
3365     + /*
3366     + * Remove this from list to protect it from beign free()
3367     + * while host disables our function
3368     + */
3369     + list_del(&list->list);
3370     +
3371     req = list->req;
3372     count = min_t(unsigned int, count, req->actual - list->pos);
3373     - spin_unlock_irqrestore(&hidg->spinlock, flags);
3374     + spin_unlock_irqrestore(&hidg->read_spinlock, flags);
3375    
3376     /* copy to user outside spinlock */
3377     count -= copy_to_user(buffer, req->buf + list->pos, count);
3378     @@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
3379     * call, taking into account its current read position.
3380     */
3381     if (list->pos == req->actual) {
3382     - spin_lock_irqsave(&hidg->spinlock, flags);
3383     - list_del(&list->list);
3384     kfree(list);
3385     - spin_unlock_irqrestore(&hidg->spinlock, flags);
3386    
3387     req->length = hidg->report_length;
3388     ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
3389     - if (ret < 0)
3390     + if (ret < 0) {
3391     + free_ep_req(hidg->out_ep, req);
3392     return ret;
3393     + }
3394     + } else {
3395     + spin_lock_irqsave(&hidg->read_spinlock, flags);
3396     + list_add(&list->list, &hidg->completed_out_req);
3397     + spin_unlock_irqrestore(&hidg->read_spinlock, flags);
3398     +
3399     + wake_up(&hidg->read_queue);
3400     }
3401    
3402     return count;
3403     @@ -255,13 +267,16 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
3404     static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req)
3405     {
3406     struct f_hidg *hidg = (struct f_hidg *)ep->driver_data;
3407     + unsigned long flags;
3408    
3409     if (req->status != 0) {
3410     ERROR(hidg->func.config->cdev,
3411     "End Point Request ERROR: %d\n", req->status);
3412     }
3413    
3414     + spin_lock_irqsave(&hidg->write_spinlock, flags);
3415     hidg->write_pending = 0;
3416     + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
3417     wake_up(&hidg->write_queue);
3418     }
3419    
3420     @@ -269,18 +284,19 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
3421     size_t count, loff_t *offp)
3422     {
3423     struct f_hidg *hidg = file->private_data;
3424     + unsigned long flags;
3425     ssize_t status = -ENOMEM;
3426    
3427     if (!access_ok(VERIFY_READ, buffer, count))
3428     return -EFAULT;
3429    
3430     - mutex_lock(&hidg->lock);
3431     + spin_lock_irqsave(&hidg->write_spinlock, flags);
3432    
3433     #define WRITE_COND (!hidg->write_pending)
3434    
3435     /* write queue */
3436     while (!WRITE_COND) {
3437     - mutex_unlock(&hidg->lock);
3438     + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
3439     if (file->f_flags & O_NONBLOCK)
3440     return -EAGAIN;
3441    
3442     @@ -288,17 +304,20 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
3443     hidg->write_queue, WRITE_COND))
3444     return -ERESTARTSYS;
3445    
3446     - mutex_lock(&hidg->lock);
3447     + spin_lock_irqsave(&hidg->write_spinlock, flags);
3448     }
3449    
3450     + hidg->write_pending = 1;
3451     count = min_t(unsigned, count, hidg->report_length);
3452     +
3453     + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
3454     status = copy_from_user(hidg->req->buf, buffer, count);
3455    
3456     if (status != 0) {
3457     ERROR(hidg->func.config->cdev,
3458     "copy_from_user error\n");
3459     - mutex_unlock(&hidg->lock);
3460     - return -EINVAL;
3461     + status = -EINVAL;
3462     + goto release_write_pending;
3463     }
3464    
3465     hidg->req->status = 0;
3466     @@ -306,19 +325,23 @@ static ssize_t f_hidg_write(struct file *file, const char __user *buffer,
3467     hidg->req->length = count;
3468     hidg->req->complete = f_hidg_req_complete;
3469     hidg->req->context = hidg;
3470     - hidg->write_pending = 1;
3471    
3472     status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
3473     if (status < 0) {
3474     ERROR(hidg->func.config->cdev,
3475     "usb_ep_queue error on int endpoint %zd\n", status);
3476     - hidg->write_pending = 0;
3477     - wake_up(&hidg->write_queue);
3478     + goto release_write_pending;
3479     } else {
3480     status = count;
3481     }
3482    
3483     - mutex_unlock(&hidg->lock);
3484     + return status;
3485     +release_write_pending:
3486     + spin_lock_irqsave(&hidg->write_spinlock, flags);
3487     + hidg->write_pending = 0;
3488     + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
3489     +
3490     + wake_up(&hidg->write_queue);
3491    
3492     return status;
3493     }
3494     @@ -371,20 +394,36 @@ static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep,
3495     static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req)
3496     {
3497     struct f_hidg *hidg = (struct f_hidg *) req->context;
3498     + struct usb_composite_dev *cdev = hidg->func.config->cdev;
3499     struct f_hidg_req_list *req_list;
3500     unsigned long flags;
3501    
3502     - req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
3503     - if (!req_list)
3504     - return;
3505     + switch (req->status) {
3506     + case 0:
3507     + req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC);
3508     + if (!req_list) {
3509     + ERROR(cdev, "Unable to allocate mem for req_list\n");
3510     + goto free_req;
3511     + }
3512    
3513     - req_list->req = req;
3514     + req_list->req = req;
3515    
3516     - spin_lock_irqsave(&hidg->spinlock, flags);
3517     - list_add_tail(&req_list->list, &hidg->completed_out_req);
3518     - spin_unlock_irqrestore(&hidg->spinlock, flags);
3519     + spin_lock_irqsave(&hidg->read_spinlock, flags);
3520     + list_add_tail(&req_list->list, &hidg->completed_out_req);
3521     + spin_unlock_irqrestore(&hidg->read_spinlock, flags);
3522    
3523     - wake_up(&hidg->read_queue);
3524     + wake_up(&hidg->read_queue);
3525     + break;
3526     + default:
3527     + ERROR(cdev, "Set report failed %d\n", req->status);
3528     + /* FALLTHROUGH */
3529     + case -ECONNABORTED: /* hardware forced ep reset */
3530     + case -ECONNRESET: /* request dequeued */
3531     + case -ESHUTDOWN: /* disconnect from host */
3532     +free_req:
3533     + free_ep_req(ep, req);
3534     + return;
3535     + }
3536     }
3537    
3538     static int hidg_setup(struct usb_function *f,
3539     @@ -490,14 +529,18 @@ static void hidg_disable(struct usb_function *f)
3540     {
3541     struct f_hidg *hidg = func_to_hidg(f);
3542     struct f_hidg_req_list *list, *next;
3543     + unsigned long flags;
3544    
3545     usb_ep_disable(hidg->in_ep);
3546     usb_ep_disable(hidg->out_ep);
3547    
3548     + spin_lock_irqsave(&hidg->read_spinlock, flags);
3549     list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
3550     + free_ep_req(hidg->out_ep, list->req);
3551     list_del(&list->list);
3552     kfree(list);
3553     }
3554     + spin_unlock_irqrestore(&hidg->read_spinlock, flags);
3555     }
3556    
3557     static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
3558     @@ -646,8 +689,8 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
3559     if (status)
3560     goto fail;
3561    
3562     - mutex_init(&hidg->lock);
3563     - spin_lock_init(&hidg->spinlock);
3564     + spin_lock_init(&hidg->write_spinlock);
3565     + spin_lock_init(&hidg->read_spinlock);
3566     init_waitqueue_head(&hidg->write_queue);
3567     init_waitqueue_head(&hidg->read_queue);
3568     INIT_LIST_HEAD(&hidg->completed_out_req);
3569     diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
3570     index 0402177f93cd..d685d82dcf48 100644
3571     --- a/drivers/usb/gadget/udc/core.c
3572     +++ b/drivers/usb/gadget/udc/core.c
3573     @@ -1080,6 +1080,24 @@ static void usb_udc_nop_release(struct device *dev)
3574     dev_vdbg(dev, "%s\n", __func__);
3575     }
3576    
3577     +/* should be called with udc_lock held */
3578     +static int check_pending_gadget_drivers(struct usb_udc *udc)
3579     +{
3580     + struct usb_gadget_driver *driver;
3581     + int ret = 0;
3582     +
3583     + list_for_each_entry(driver, &gadget_driver_pending_list, pending)
3584     + if (!driver->udc_name || strcmp(driver->udc_name,
3585     + dev_name(&udc->dev)) == 0) {
3586     + ret = udc_bind_to_driver(udc, driver);
3587     + if (ret != -EPROBE_DEFER)
3588     + list_del(&driver->pending);
3589     + break;
3590     + }
3591     +
3592     + return ret;
3593     +}
3594     +
3595     /**
3596     * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
3597     * @parent: the parent device to this udc. Usually the controller driver's
3598     @@ -1093,7 +1111,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
3599     void (*release)(struct device *dev))
3600     {
3601     struct usb_udc *udc;
3602     - struct usb_gadget_driver *driver;
3603     int ret = -ENOMEM;
3604    
3605     udc = kzalloc(sizeof(*udc), GFP_KERNEL);
3606     @@ -1136,17 +1153,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
3607     udc->vbus = true;
3608    
3609     /* pick up one of pending gadget drivers */
3610     - list_for_each_entry(driver, &gadget_driver_pending_list, pending) {
3611     - if (!driver->udc_name || strcmp(driver->udc_name,
3612     - dev_name(&udc->dev)) == 0) {
3613     - ret = udc_bind_to_driver(udc, driver);
3614     - if (ret != -EPROBE_DEFER)
3615     - list_del(&driver->pending);
3616     - if (ret)
3617     - goto err5;
3618     - break;
3619     - }
3620     - }
3621     + ret = check_pending_gadget_drivers(udc);
3622     + if (ret)
3623     + goto err5;
3624    
3625     mutex_unlock(&udc_lock);
3626    
3627     @@ -1356,14 +1365,22 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
3628     return -EINVAL;
3629    
3630     mutex_lock(&udc_lock);
3631     - list_for_each_entry(udc, &udc_list, list)
3632     + list_for_each_entry(udc, &udc_list, list) {
3633     if (udc->driver == driver) {
3634     usb_gadget_remove_driver(udc);
3635     usb_gadget_set_state(udc->gadget,
3636     - USB_STATE_NOTATTACHED);
3637     + USB_STATE_NOTATTACHED);
3638     +
3639     + /* Maybe there is someone waiting for this UDC? */
3640     + check_pending_gadget_drivers(udc);
3641     + /*
3642     + * For now we ignore bind errors as probably it's
3643     + * not a valid reason to fail other's gadget unbind
3644     + */
3645     ret = 0;
3646     break;
3647     }
3648     + }
3649    
3650     if (ret) {
3651     list_del(&driver->pending);
3652     diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
3653     index aab5221d6c2e..aac0ce8aeb0b 100644
3654     --- a/drivers/usb/gadget/udc/fsl_udc_core.c
3655     +++ b/drivers/usb/gadget/udc/fsl_udc_core.c
3656     @@ -1249,6 +1249,12 @@ static const struct usb_gadget_ops fsl_gadget_ops = {
3657     .udc_stop = fsl_udc_stop,
3658     };
3659    
3660     +/*
3661     + * Empty complete function used by this driver to fill in the req->complete
3662     + * field when creating a request since the complete field is mandatory.
3663     + */
3664     +static void fsl_noop_complete(struct usb_ep *ep, struct usb_request *req) { }
3665     +
3666     /* Set protocol stall on ep0, protocol stall will automatically be cleared
3667     on new transaction */
3668     static void ep0stall(struct fsl_udc *udc)
3669     @@ -1283,7 +1289,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
3670     req->req.length = 0;
3671     req->req.status = -EINPROGRESS;
3672     req->req.actual = 0;
3673     - req->req.complete = NULL;
3674     + req->req.complete = fsl_noop_complete;
3675     req->dtd_count = 0;
3676    
3677     ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
3678     @@ -1366,7 +1372,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
3679     req->req.length = 2;
3680     req->req.status = -EINPROGRESS;
3681     req->req.actual = 0;
3682     - req->req.complete = NULL;
3683     + req->req.complete = fsl_noop_complete;
3684     req->dtd_count = 0;
3685    
3686     ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
3687     diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
3688     index ed56bf9ed885..abe360684f0b 100644
3689     --- a/drivers/usb/host/xhci-plat.c
3690     +++ b/drivers/usb/host/xhci-plat.c
3691     @@ -223,9 +223,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
3692     if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
3693     xhci->quirks |= XHCI_LPM_SUPPORT;
3694    
3695     - if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
3696     - xhci->shared_hcd->can_do_streams = 1;
3697     -
3698     hcd->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
3699     if (IS_ERR(hcd->usb_phy)) {
3700     ret = PTR_ERR(hcd->usb_phy);
3701     @@ -242,6 +239,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
3702     if (ret)
3703     goto disable_usb_phy;
3704    
3705     + if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
3706     + xhci->shared_hcd->can_do_streams = 1;
3707     +
3708     ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
3709     if (ret)
3710     goto dealloc_usb2_hcd;
3711     diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
3712     index 2440f88e07a3..bacee0fd4dd3 100644
3713     --- a/drivers/usb/musb/da8xx.c
3714     +++ b/drivers/usb/musb/da8xx.c
3715     @@ -434,15 +434,11 @@ static int da8xx_musb_exit(struct musb *musb)
3716     }
3717    
3718     static const struct musb_platform_ops da8xx_ops = {
3719     - .quirks = MUSB_DMA_CPPI | MUSB_INDEXED_EP,
3720     + .quirks = MUSB_INDEXED_EP,
3721     .init = da8xx_musb_init,
3722     .exit = da8xx_musb_exit,
3723    
3724     .fifo_mode = 2,
3725     -#ifdef CONFIG_USB_TI_CPPI_DMA
3726     - .dma_init = cppi_dma_controller_create,
3727     - .dma_exit = cppi_dma_controller_destroy,
3728     -#endif
3729     .enable = da8xx_musb_enable,
3730     .disable = da8xx_musb_disable,
3731    
3732     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
3733     index 181793f07852..9d2738e9217f 100644
3734     --- a/drivers/virtio/virtio_balloon.c
3735     +++ b/drivers/virtio/virtio_balloon.c
3736     @@ -615,8 +615,12 @@ static void virtballoon_remove(struct virtio_device *vdev)
3737     cancel_work_sync(&vb->update_balloon_stats_work);
3738    
3739     remove_common(vb);
3740     +#ifdef CONFIG_BALLOON_COMPACTION
3741     if (vb->vb_dev_info.inode)
3742     iput(vb->vb_dev_info.inode);
3743     +
3744     + kern_unmount(balloon_mnt);
3745     +#endif
3746     kfree(vb);
3747     }
3748    
3749     diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
3750     index bdbadaa47ef3..0035cf79760a 100644
3751     --- a/drivers/vme/vme.c
3752     +++ b/drivers/vme/vme.c
3753     @@ -1625,10 +1625,25 @@ static int vme_bus_probe(struct device *dev)
3754     return retval;
3755     }
3756    
3757     +static int vme_bus_remove(struct device *dev)
3758     +{
3759     + int retval = -ENODEV;
3760     + struct vme_driver *driver;
3761     + struct vme_dev *vdev = dev_to_vme_dev(dev);
3762     +
3763     + driver = dev->platform_data;
3764     +
3765     + if (driver->remove != NULL)
3766     + retval = driver->remove(vdev);
3767     +
3768     + return retval;
3769     +}
3770     +
3771     struct bus_type vme_bus_type = {
3772     .name = "vme",
3773     .match = vme_bus_match,
3774     .probe = vme_bus_probe,
3775     + .remove = vme_bus_remove,
3776     };
3777     EXPORT_SYMBOL(vme_bus_type);
3778    
3779     diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
3780     index 049a884a756f..59d74d1b47a8 100644
3781     --- a/drivers/w1/masters/ds2490.c
3782     +++ b/drivers/w1/masters/ds2490.c
3783     @@ -153,6 +153,9 @@ struct ds_device
3784     */
3785     u16 spu_bit;
3786    
3787     + u8 st_buf[ST_SIZE];
3788     + u8 byte_buf;
3789     +
3790     struct w1_bus_master master;
3791     };
3792    
3793     @@ -174,7 +177,6 @@ struct ds_status
3794     u8 data_in_buffer_status;
3795     u8 reserved1;
3796     u8 reserved2;
3797     -
3798     };
3799    
3800     static struct usb_device_id ds_id_table [] = {
3801     @@ -244,28 +246,6 @@ static int ds_send_control(struct ds_device *dev, u16 value, u16 index)
3802     return err;
3803     }
3804    
3805     -static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
3806     - unsigned char *buf, int size)
3807     -{
3808     - int count, err;
3809     -
3810     - memset(st, 0, sizeof(*st));
3811     -
3812     - count = 0;
3813     - err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
3814     - dev->ep[EP_STATUS]), buf, size, &count, 1000);
3815     - if (err < 0) {
3816     - pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
3817     - dev->ep[EP_STATUS], err);
3818     - return err;
3819     - }
3820     -
3821     - if (count >= sizeof(*st))
3822     - memcpy(st, buf, sizeof(*st));
3823     -
3824     - return count;
3825     -}
3826     -
3827     static inline void ds_print_msg(unsigned char *buf, unsigned char *str, int off)
3828     {
3829     pr_info("%45s: %8x\n", str, buf[off]);
3830     @@ -324,6 +304,35 @@ static void ds_dump_status(struct ds_device *dev, unsigned char *buf, int count)
3831     }
3832     }
3833    
3834     +static int ds_recv_status(struct ds_device *dev, struct ds_status *st,
3835     + bool dump)
3836     +{
3837     + int count, err;
3838     +
3839     + if (st)
3840     + memset(st, 0, sizeof(*st));
3841     +
3842     + count = 0;
3843     + err = usb_interrupt_msg(dev->udev,
3844     + usb_rcvintpipe(dev->udev,
3845     + dev->ep[EP_STATUS]),
3846     + dev->st_buf, sizeof(dev->st_buf),
3847     + &count, 1000);
3848     + if (err < 0) {
3849     + pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
3850     + dev->ep[EP_STATUS], err);
3851     + return err;
3852     + }
3853     +
3854     + if (dump)
3855     + ds_dump_status(dev, dev->st_buf, count);
3856     +
3857     + if (st && count >= sizeof(*st))
3858     + memcpy(st, dev->st_buf, sizeof(*st));
3859     +
3860     + return count;
3861     +}
3862     +
3863     static void ds_reset_device(struct ds_device *dev)
3864     {
3865     ds_send_control_cmd(dev, CTL_RESET_DEVICE, 0);
3866     @@ -344,7 +353,6 @@ static void ds_reset_device(struct ds_device *dev)
3867     static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
3868     {
3869     int count, err;
3870     - struct ds_status st;
3871    
3872     /* Careful on size. If size is less than what is available in
3873     * the input buffer, the device fails the bulk transfer and
3874     @@ -359,14 +367,9 @@ static int ds_recv_data(struct ds_device *dev, unsigned char *buf, int size)
3875     err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]),
3876     buf, size, &count, 1000);
3877     if (err < 0) {
3878     - u8 buf[ST_SIZE];
3879     - int count;
3880     -
3881     pr_info("Clearing ep0x%x.\n", dev->ep[EP_DATA_IN]);
3882     usb_clear_halt(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_DATA_IN]));
3883     -
3884     - count = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
3885     - ds_dump_status(dev, buf, count);
3886     + ds_recv_status(dev, NULL, true);
3887     return err;
3888     }
3889    
3890     @@ -404,7 +407,6 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
3891     {
3892     struct ds_status st;
3893     int count = 0, err = 0;
3894     - u8 buf[ST_SIZE];
3895    
3896     do {
3897     err = ds_send_control(dev, CTL_HALT_EXE_IDLE, 0);
3898     @@ -413,7 +415,7 @@ int ds_stop_pulse(struct ds_device *dev, int limit)
3899     err = ds_send_control(dev, CTL_RESUME_EXE, 0);
3900     if (err)
3901     break;
3902     - err = ds_recv_status_nodump(dev, &st, buf, sizeof(buf));
3903     + err = ds_recv_status(dev, &st, false);
3904     if (err)
3905     break;
3906    
3907     @@ -456,18 +458,17 @@ int ds_detect(struct ds_device *dev, struct ds_status *st)
3908    
3909     static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
3910     {
3911     - u8 buf[ST_SIZE];
3912     int err, count = 0;
3913    
3914     do {
3915     st->status = 0;
3916     - err = ds_recv_status_nodump(dev, st, buf, sizeof(buf));
3917     + err = ds_recv_status(dev, st, false);
3918     #if 0
3919     if (err >= 0) {
3920     int i;
3921     printk("0x%x: count=%d, status: ", dev->ep[EP_STATUS], err);
3922     for (i=0; i<err; ++i)
3923     - printk("%02x ", buf[i]);
3924     + printk("%02x ", dev->st_buf[i]);
3925     printk("\n");
3926     }
3927     #endif
3928     @@ -485,7 +486,7 @@ static int ds_wait_status(struct ds_device *dev, struct ds_status *st)
3929     * can do something with it).
3930     */
3931     if (err > 16 || count >= 100 || err < 0)
3932     - ds_dump_status(dev, buf, err);
3933     + ds_dump_status(dev, dev->st_buf, err);
3934    
3935     /* Extended data isn't an error. Well, a short is, but the dump
3936     * would have already told the user that and we can't do anything
3937     @@ -608,7 +609,6 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
3938     {
3939     int err;
3940     struct ds_status st;
3941     - u8 rbyte;
3942    
3943     err = ds_send_control(dev, COMM_BYTE_IO | COMM_IM | dev->spu_bit, byte);
3944     if (err)
3945     @@ -621,11 +621,11 @@ static int ds_write_byte(struct ds_device *dev, u8 byte)
3946     if (err)
3947     return err;
3948    
3949     - err = ds_recv_data(dev, &rbyte, sizeof(rbyte));
3950     + err = ds_recv_data(dev, &dev->byte_buf, 1);
3951     if (err < 0)
3952     return err;
3953    
3954     - return !(byte == rbyte);
3955     + return !(byte == dev->byte_buf);
3956     }
3957    
3958     static int ds_read_byte(struct ds_device *dev, u8 *byte)
3959     @@ -712,7 +712,6 @@ static void ds9490r_search(void *data, struct w1_master *master,
3960     int err;
3961     u16 value, index;
3962     struct ds_status st;
3963     - u8 st_buf[ST_SIZE];
3964     int search_limit;
3965     int found = 0;
3966     int i;
3967     @@ -724,7 +723,12 @@ static void ds9490r_search(void *data, struct w1_master *master,
3968     /* FIFO 128 bytes, bulk packet size 64, read a multiple of the
3969     * packet size.
3970     */
3971     - u64 buf[2*64/8];
3972     + const size_t bufsize = 2 * 64;
3973     + u64 *buf;
3974     +
3975     + buf = kmalloc(bufsize, GFP_KERNEL);
3976     + if (!buf)
3977     + return;
3978    
3979     mutex_lock(&master->bus_mutex);
3980    
3981     @@ -745,10 +749,9 @@ static void ds9490r_search(void *data, struct w1_master *master,
3982     do {
3983     schedule_timeout(jtime);
3984    
3985     - if (ds_recv_status_nodump(dev, &st, st_buf, sizeof(st_buf)) <
3986     - sizeof(st)) {
3987     + err = ds_recv_status(dev, &st, false);
3988     + if (err < 0 || err < sizeof(st))
3989     break;
3990     - }
3991    
3992     if (st.data_in_buffer_status) {
3993     /* Bulk in can receive partial ids, but when it does
3994     @@ -758,7 +761,7 @@ static void ds9490r_search(void *data, struct w1_master *master,
3995     * bulk without first checking if status says there
3996     * is data to read.
3997     */
3998     - err = ds_recv_data(dev, (u8 *)buf, sizeof(buf));
3999     + err = ds_recv_data(dev, (u8 *)buf, bufsize);
4000     if (err < 0)
4001     break;
4002     for (i = 0; i < err/8; ++i) {
4003     @@ -794,9 +797,14 @@ static void ds9490r_search(void *data, struct w1_master *master,
4004     }
4005     search_out:
4006     mutex_unlock(&master->bus_mutex);
4007     + kfree(buf);
4008     }
4009    
4010     #if 0
4011     +/*
4012     + * FIXME: if this disabled code is ever used in the future all ds_send_data()
4013     + * calls must be changed to use a DMAable buffer.
4014     + */
4015     static int ds_match_access(struct ds_device *dev, u64 init)
4016     {
4017     int err;
4018     @@ -845,13 +853,12 @@ static int ds_set_path(struct ds_device *dev, u64 init)
4019    
4020     static u8 ds9490r_touch_bit(void *data, u8 bit)
4021     {
4022     - u8 ret;
4023     struct ds_device *dev = data;
4024    
4025     - if (ds_touch_bit(dev, bit, &ret))
4026     + if (ds_touch_bit(dev, bit, &dev->byte_buf))
4027     return 0;
4028    
4029     - return ret;
4030     + return dev->byte_buf;
4031     }
4032    
4033     #if 0
4034     @@ -866,13 +873,12 @@ static u8 ds9490r_read_bit(void *data)
4035     {
4036     struct ds_device *dev = data;
4037     int err;
4038     - u8 bit = 0;
4039    
4040     - err = ds_touch_bit(dev, 1, &bit);
4041     + err = ds_touch_bit(dev, 1, &dev->byte_buf);
4042     if (err)
4043     return 0;
4044    
4045     - return bit & 1;
4046     + return dev->byte_buf & 1;
4047     }
4048     #endif
4049    
4050     @@ -887,32 +893,52 @@ static u8 ds9490r_read_byte(void *data)
4051     {
4052     struct ds_device *dev = data;
4053     int err;
4054     - u8 byte = 0;
4055    
4056     - err = ds_read_byte(dev, &byte);
4057     + err = ds_read_byte(dev, &dev->byte_buf);
4058     if (err)
4059     return 0;
4060    
4061     - return byte;
4062     + return dev->byte_buf;
4063     }
4064    
4065     static void ds9490r_write_block(void *data, const u8 *buf, int len)
4066     {
4067     struct ds_device *dev = data;
4068     + u8 *tbuf;
4069     +
4070     + if (len <= 0)
4071     + return;
4072     +
4073     + tbuf = kmalloc(len, GFP_KERNEL);
4074     + if (!tbuf)
4075     + return;
4076    
4077     - ds_write_block(dev, (u8 *)buf, len);
4078     + memcpy(tbuf, buf, len);
4079     + ds_write_block(dev, tbuf, len);
4080     +
4081     + kfree(tbuf);
4082     }
4083    
4084     static u8 ds9490r_read_block(void *data, u8 *buf, int len)
4085     {
4086     struct ds_device *dev = data;
4087     int err;
4088     + u8 *tbuf;
4089    
4090     - err = ds_read_block(dev, buf, len);
4091     - if (err < 0)
4092     + if (len <= 0)
4093     + return 0;
4094     +
4095     + tbuf = kmalloc(len, GFP_KERNEL);
4096     + if (!tbuf)
4097     return 0;
4098    
4099     - return len;
4100     + err = ds_read_block(dev, tbuf, len);
4101     + if (err >= 0)
4102     + memcpy(buf, tbuf, len);
4103     +
4104     + kfree(tbuf);
4105     +
4106     + return err >= 0 ? len : 0;
4107     }
4108    
4109     static u8 ds9490r_reset(void *data)
4110     diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
4111     index e213c678bbfe..ab0931e7a9bb 100644
4112     --- a/drivers/w1/w1.c
4113     +++ b/drivers/w1/w1.c
4114     @@ -763,6 +763,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
4115     dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
4116     sl->name);
4117     w1_family_put(sl->family);
4118     + atomic_dec(&sl->master->refcnt);
4119     kfree(sl);
4120     return err;
4121     }
4122     diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
4123     index ef3ebd780aff..1e643c718917 100644
4124     --- a/fs/ceph/addr.c
4125     +++ b/fs/ceph/addr.c
4126     @@ -363,6 +363,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
4127     nr_pages = i;
4128     if (nr_pages > 0) {
4129     len = nr_pages << PAGE_SHIFT;
4130     + osd_req_op_extent_update(req, 0, len);
4131     break;
4132     }
4133     goto out_pages;
4134     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
4135     index 18a1e1d6671f..1cd0e2eefc66 100644
4136     --- a/fs/cifs/file.c
4137     +++ b/fs/cifs/file.c
4138     @@ -2884,7 +2884,15 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
4139     for (i = 0; i < rdata->nr_pages; i++) {
4140     struct page *page = rdata->pages[i];
4141     size_t copy = min_t(size_t, remaining, PAGE_SIZE);
4142     - size_t written = copy_page_to_iter(page, 0, copy, iter);
4143     + size_t written;
4144     +
4145     + if (unlikely(iter->type & ITER_PIPE)) {
4146     + void *addr = kmap_atomic(page);
4147     +
4148     + written = copy_to_iter(addr, copy, iter);
4149     + kunmap_atomic(addr);
4150     + } else
4151     + written = copy_page_to_iter(page, 0, copy, iter);
4152     remaining -= written;
4153     if (written < copy && iov_iter_count(iter) > 0)
4154     break;
4155     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
4156     index c930a0110fb4..9fbf92ca358c 100644
4157     --- a/fs/ext4/extents.c
4158     +++ b/fs/ext4/extents.c
4159     @@ -5344,7 +5344,8 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
4160     ext4_lblk_t stop, *iterator, ex_start, ex_end;
4161    
4162     /* Let path point to the last extent */
4163     - path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
4164     + path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
4165     + EXT4_EX_NOCACHE);
4166     if (IS_ERR(path))
4167     return PTR_ERR(path);
4168    
4169     @@ -5353,15 +5354,15 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
4170     if (!extent)
4171     goto out;
4172    
4173     - stop = le32_to_cpu(extent->ee_block) +
4174     - ext4_ext_get_actual_len(extent);
4175     + stop = le32_to_cpu(extent->ee_block);
4176    
4177     /*
4178     * In case of left shift, Don't start shifting extents until we make
4179     * sure the hole is big enough to accommodate the shift.
4180     */
4181     if (SHIFT == SHIFT_LEFT) {
4182     - path = ext4_find_extent(inode, start - 1, &path, 0);
4183     + path = ext4_find_extent(inode, start - 1, &path,
4184     + EXT4_EX_NOCACHE);
4185     if (IS_ERR(path))
4186     return PTR_ERR(path);
4187     depth = path->p_depth;
4188     @@ -5393,9 +5394,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
4189     else
4190     iterator = &stop;
4191    
4192     - /* Its safe to start updating extents */
4193     - while (start < stop) {
4194     - path = ext4_find_extent(inode, *iterator, &path, 0);
4195     + /*
4196     + * Its safe to start updating extents. Start and stop are unsigned, so
4197     + * in case of right shift if extent with 0 block is reached, iterator
4198     + * becomes NULL to indicate the end of the loop.
4199     + */
4200     + while (iterator && start <= stop) {
4201     + path = ext4_find_extent(inode, *iterator, &path,
4202     + EXT4_EX_NOCACHE);
4203     if (IS_ERR(path))
4204     return PTR_ERR(path);
4205     depth = path->p_depth;
4206     @@ -5422,8 +5428,11 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
4207     ext4_ext_get_actual_len(extent);
4208     } else {
4209     extent = EXT_FIRST_EXTENT(path[depth].p_hdr);
4210     - *iterator = le32_to_cpu(extent->ee_block) > 0 ?
4211     - le32_to_cpu(extent->ee_block) - 1 : 0;
4212     + if (le32_to_cpu(extent->ee_block) > 0)
4213     + *iterator = le32_to_cpu(extent->ee_block) - 1;
4214     + else
4215     + /* Beginning is reached, end of the loop */
4216     + iterator = NULL;
4217     /* Update path extent in case we need to stop */
4218     while (le32_to_cpu(extent->ee_block) < start)
4219     extent++;
4220     diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
4221     index d8ca4b9f9dd6..37b521ed39df 100644
4222     --- a/fs/ext4/inline.c
4223     +++ b/fs/ext4/inline.c
4224     @@ -376,7 +376,7 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
4225     static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
4226     unsigned int len)
4227     {
4228     - int ret, size;
4229     + int ret, size, no_expand;
4230     struct ext4_inode_info *ei = EXT4_I(inode);
4231    
4232     if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
4233     @@ -386,15 +386,14 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
4234     if (size < len)
4235     return -ENOSPC;
4236    
4237     - down_write(&EXT4_I(inode)->xattr_sem);
4238     + ext4_write_lock_xattr(inode, &no_expand);
4239    
4240     if (ei->i_inline_off)
4241     ret = ext4_update_inline_data(handle, inode, len);
4242     else
4243     ret = ext4_create_inline_data(handle, inode, len);
4244    
4245     - up_write(&EXT4_I(inode)->xattr_sem);
4246     -
4247     + ext4_write_unlock_xattr(inode, &no_expand);
4248     return ret;
4249     }
4250    
4251     @@ -523,7 +522,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
4252     struct inode *inode,
4253     unsigned flags)
4254     {
4255     - int ret, needed_blocks;
4256     + int ret, needed_blocks, no_expand;
4257     handle_t *handle = NULL;
4258     int retries = 0, sem_held = 0;
4259     struct page *page = NULL;
4260     @@ -563,7 +562,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
4261     goto out;
4262     }
4263    
4264     - down_write(&EXT4_I(inode)->xattr_sem);
4265     + ext4_write_lock_xattr(inode, &no_expand);
4266     sem_held = 1;
4267     /* If some one has already done this for us, just exit. */
4268     if (!ext4_has_inline_data(inode)) {
4269     @@ -600,7 +599,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
4270     put_page(page);
4271     page = NULL;
4272     ext4_orphan_add(handle, inode);
4273     - up_write(&EXT4_I(inode)->xattr_sem);
4274     + ext4_write_unlock_xattr(inode, &no_expand);
4275     sem_held = 0;
4276     ext4_journal_stop(handle);
4277     handle = NULL;
4278     @@ -626,7 +625,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
4279     put_page(page);
4280     }
4281     if (sem_held)
4282     - up_write(&EXT4_I(inode)->xattr_sem);
4283     + ext4_write_unlock_xattr(inode, &no_expand);
4284     if (handle)
4285     ext4_journal_stop(handle);
4286     brelse(iloc.bh);
4287     @@ -719,7 +718,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
4288     int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
4289     unsigned copied, struct page *page)
4290     {
4291     - int ret;
4292     + int ret, no_expand;
4293     void *kaddr;
4294     struct ext4_iloc iloc;
4295    
4296     @@ -737,7 +736,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
4297     goto out;
4298     }
4299    
4300     - down_write(&EXT4_I(inode)->xattr_sem);
4301     + ext4_write_lock_xattr(inode, &no_expand);
4302     BUG_ON(!ext4_has_inline_data(inode));
4303    
4304     kaddr = kmap_atomic(page);
4305     @@ -747,7 +746,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
4306     /* clear page dirty so that writepages wouldn't work for us. */
4307     ClearPageDirty(page);
4308    
4309     - up_write(&EXT4_I(inode)->xattr_sem);
4310     + ext4_write_unlock_xattr(inode, &no_expand);
4311     brelse(iloc.bh);
4312     out:
4313     return copied;
4314     @@ -758,7 +757,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
4315     unsigned len,
4316     struct page *page)
4317     {
4318     - int ret;
4319     + int ret, no_expand;
4320     void *kaddr;
4321     struct ext4_iloc iloc;
4322    
4323     @@ -768,11 +767,11 @@ ext4_journalled_write_inline_data(struct inode *inode,
4324     return NULL;
4325     }
4326    
4327     - down_write(&EXT4_I(inode)->xattr_sem);
4328     + ext4_write_lock_xattr(inode, &no_expand);
4329     kaddr = kmap_atomic(page);
4330     ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
4331     kunmap_atomic(kaddr);
4332     - up_write(&EXT4_I(inode)->xattr_sem);
4333     + ext4_write_unlock_xattr(inode, &no_expand);
4334    
4335     return iloc.bh;
4336     }
4337     @@ -934,8 +933,15 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
4338     struct page *page)
4339     {
4340     int i_size_changed = 0;
4341     + int ret;
4342    
4343     - copied = ext4_write_inline_data_end(inode, pos, len, copied, page);
4344     + ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
4345     + if (ret < 0) {
4346     + unlock_page(page);
4347     + put_page(page);
4348     + return ret;
4349     + }
4350     + copied = ret;
4351    
4352     /*
4353     * No need to use i_size_read() here, the i_size
4354     @@ -1249,7 +1255,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
4355     int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
4356     struct inode *dir, struct inode *inode)
4357     {
4358     - int ret, inline_size;
4359     + int ret, inline_size, no_expand;
4360     void *inline_start;
4361     struct ext4_iloc iloc;
4362    
4363     @@ -1257,7 +1263,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
4364     if (ret)
4365     return ret;
4366    
4367     - down_write(&EXT4_I(dir)->xattr_sem);
4368     + ext4_write_lock_xattr(dir, &no_expand);
4369     if (!ext4_has_inline_data(dir))
4370     goto out;
4371    
4372     @@ -1303,7 +1309,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
4373    
4374     out:
4375     ext4_mark_inode_dirty(handle, dir);
4376     - up_write(&EXT4_I(dir)->xattr_sem);
4377     + ext4_write_unlock_xattr(dir, &no_expand);
4378     brelse(iloc.bh);
4379     return ret;
4380     }
4381     @@ -1663,7 +1669,7 @@ int ext4_delete_inline_entry(handle_t *handle,
4382     struct buffer_head *bh,
4383     int *has_inline_data)
4384     {
4385     - int err, inline_size;
4386     + int err, inline_size, no_expand;
4387     struct ext4_iloc iloc;
4388     void *inline_start;
4389    
4390     @@ -1671,7 +1677,7 @@ int ext4_delete_inline_entry(handle_t *handle,
4391     if (err)
4392     return err;
4393    
4394     - down_write(&EXT4_I(dir)->xattr_sem);
4395     + ext4_write_lock_xattr(dir, &no_expand);
4396     if (!ext4_has_inline_data(dir)) {
4397     *has_inline_data = 0;
4398     goto out;
4399     @@ -1705,7 +1711,7 @@ int ext4_delete_inline_entry(handle_t *handle,
4400    
4401     ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
4402     out:
4403     - up_write(&EXT4_I(dir)->xattr_sem);
4404     + ext4_write_unlock_xattr(dir, &no_expand);
4405     brelse(iloc.bh);
4406     if (err != -ENOENT)
4407     ext4_std_error(dir->i_sb, err);
4408     @@ -1804,11 +1810,11 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
4409    
4410     int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
4411     {
4412     - int ret;
4413     + int ret, no_expand;
4414    
4415     - down_write(&EXT4_I(inode)->xattr_sem);
4416     + ext4_write_lock_xattr(inode, &no_expand);
4417     ret = ext4_destroy_inline_data_nolock(handle, inode);
4418     - up_write(&EXT4_I(inode)->xattr_sem);
4419     + ext4_write_unlock_xattr(inode, &no_expand);
4420    
4421     return ret;
4422     }
4423     @@ -1893,7 +1899,7 @@ int ext4_try_to_evict_inline_data(handle_t *handle,
4424     void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
4425     {
4426     handle_t *handle;
4427     - int inline_size, value_len, needed_blocks;
4428     + int inline_size, value_len, needed_blocks, no_expand;
4429     size_t i_size;
4430     void *value = NULL;
4431     struct ext4_xattr_ibody_find is = {
4432     @@ -1910,7 +1916,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
4433     if (IS_ERR(handle))
4434     return;
4435    
4436     - down_write(&EXT4_I(inode)->xattr_sem);
4437     + ext4_write_lock_xattr(inode, &no_expand);
4438     if (!ext4_has_inline_data(inode)) {
4439     *has_inline = 0;
4440     ext4_journal_stop(handle);
4441     @@ -1968,7 +1974,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
4442     up_write(&EXT4_I(inode)->i_data_sem);
4443     out:
4444     brelse(is.iloc.bh);
4445     - up_write(&EXT4_I(inode)->xattr_sem);
4446     + ext4_write_unlock_xattr(inode, &no_expand);
4447     kfree(value);
4448     if (inode->i_nlink)
4449     ext4_orphan_del(handle, inode);
4450     @@ -1984,7 +1990,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
4451    
4452     int ext4_convert_inline_data(struct inode *inode)
4453     {
4454     - int error, needed_blocks;
4455     + int error, needed_blocks, no_expand;
4456     handle_t *handle;
4457     struct ext4_iloc iloc;
4458    
4459     @@ -2006,15 +2012,10 @@ int ext4_convert_inline_data(struct inode *inode)
4460     goto out_free;
4461     }
4462    
4463     - down_write(&EXT4_I(inode)->xattr_sem);
4464     - if (!ext4_has_inline_data(inode)) {
4465     - up_write(&EXT4_I(inode)->xattr_sem);
4466     - goto out;
4467     - }
4468     -
4469     - error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
4470     - up_write(&EXT4_I(inode)->xattr_sem);
4471     -out:
4472     + ext4_write_lock_xattr(inode, &no_expand);
4473     + if (ext4_has_inline_data(inode))
4474     + error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
4475     + ext4_write_unlock_xattr(inode, &no_expand);
4476     ext4_journal_stop(handle);
4477     out_free:
4478     brelse(iloc.bh);
4479     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4480     index 33a509c876ee..1d4f5faa04b5 100644
4481     --- a/fs/ext4/inode.c
4482     +++ b/fs/ext4/inode.c
4483     @@ -1324,8 +1324,11 @@ static int ext4_write_end(struct file *file,
4484     if (ext4_has_inline_data(inode)) {
4485     ret = ext4_write_inline_data_end(inode, pos, len,
4486     copied, page);
4487     - if (ret < 0)
4488     + if (ret < 0) {
4489     + unlock_page(page);
4490     + put_page(page);
4491     goto errout;
4492     + }
4493     copied = ret;
4494     } else
4495     copied = block_write_end(file, mapping, pos,
4496     @@ -1379,7 +1382,9 @@ static int ext4_write_end(struct file *file,
4497     * set the buffer to be dirty, since in data=journalled mode we need
4498     * to call ext4_handle_dirty_metadata() instead.
4499     */
4500     -static void zero_new_buffers(struct page *page, unsigned from, unsigned to)
4501     +static void ext4_journalled_zero_new_buffers(handle_t *handle,
4502     + struct page *page,
4503     + unsigned from, unsigned to)
4504     {
4505     unsigned int block_start = 0, block_end;
4506     struct buffer_head *head, *bh;
4507     @@ -1396,7 +1401,7 @@ static void zero_new_buffers(struct page *page, unsigned from, unsigned to)
4508     size = min(to, block_end) - start;
4509    
4510     zero_user(page, start, size);
4511     - set_buffer_uptodate(bh);
4512     + write_end_fn(handle, bh);
4513     }
4514     clear_buffer_new(bh);
4515     }
4516     @@ -1425,18 +1430,25 @@ static int ext4_journalled_write_end(struct file *file,
4517    
4518     BUG_ON(!ext4_handle_valid(handle));
4519    
4520     - if (ext4_has_inline_data(inode))
4521     - copied = ext4_write_inline_data_end(inode, pos, len,
4522     - copied, page);
4523     - else {
4524     - if (copied < len) {
4525     - if (!PageUptodate(page))
4526     - copied = 0;
4527     - zero_new_buffers(page, from+copied, to);
4528     + if (ext4_has_inline_data(inode)) {
4529     + ret = ext4_write_inline_data_end(inode, pos, len,
4530     + copied, page);
4531     + if (ret < 0) {
4532     + unlock_page(page);
4533     + put_page(page);
4534     + goto errout;
4535     }
4536     -
4537     + copied = ret;
4538     + } else if (unlikely(copied < len) && !PageUptodate(page)) {
4539     + copied = 0;
4540     + ext4_journalled_zero_new_buffers(handle, page, from, to);
4541     + } else {
4542     + if (unlikely(copied < len))
4543     + ext4_journalled_zero_new_buffers(handle, page,
4544     + from + copied, to);
4545     ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
4546     - to, &partial, write_end_fn);
4547     + from + copied, &partial,
4548     + write_end_fn);
4549     if (!partial)
4550     SetPageUptodate(page);
4551     }
4552     @@ -1462,6 +1474,7 @@ static int ext4_journalled_write_end(struct file *file,
4553     */
4554     ext4_orphan_add(handle, inode);
4555    
4556     +errout:
4557     ret2 = ext4_journal_stop(handle);
4558     if (!ret)
4559     ret = ret2;
4560     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
4561     index 7ae43c59bc79..2e9fc7a61048 100644
4562     --- a/fs/ext4/mballoc.c
4563     +++ b/fs/ext4/mballoc.c
4564     @@ -3123,6 +3123,13 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
4565     if (ar->pright && start + size - 1 >= ar->lright)
4566     size -= start + size - ar->lright;
4567    
4568     + /*
4569     + * Trim allocation request for filesystems with artificially small
4570     + * groups.
4571     + */
4572     + if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
4573     + size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
4574     +
4575     end = start + size;
4576    
4577     /* check we don't cross already preallocated blocks */
4578     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4579     index 104f8bfba718..c4a389a6027b 100644
4580     --- a/fs/ext4/namei.c
4581     +++ b/fs/ext4/namei.c
4582     @@ -1616,13 +1616,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
4583     !fscrypt_has_permitted_context(dir, inode)) {
4584     int nokey = ext4_encrypted_inode(inode) &&
4585     !fscrypt_has_encryption_key(inode);
4586     - iput(inode);
4587     - if (nokey)
4588     + if (nokey) {
4589     + iput(inode);
4590     return ERR_PTR(-ENOKEY);
4591     + }
4592     ext4_warning(inode->i_sb,
4593     "Inconsistent encryption contexts: %lu/%lu",
4594     (unsigned long) dir->i_ino,
4595     (unsigned long) inode->i_ino);
4596     + iput(inode);
4597     return ERR_PTR(-EPERM);
4598     }
4599     }
4600     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4601     index bbc316db9495..afe29ba42a4e 100644
4602     --- a/fs/ext4/super.c
4603     +++ b/fs/ext4/super.c
4604     @@ -825,6 +825,7 @@ static void ext4_put_super(struct super_block *sb)
4605     {
4606     struct ext4_sb_info *sbi = EXT4_SB(sb);
4607     struct ext4_super_block *es = sbi->s_es;
4608     + int aborted = 0;
4609     int i, err;
4610    
4611     ext4_unregister_li_request(sb);
4612     @@ -834,9 +835,10 @@ static void ext4_put_super(struct super_block *sb)
4613     destroy_workqueue(sbi->rsv_conversion_wq);
4614    
4615     if (sbi->s_journal) {
4616     + aborted = is_journal_aborted(sbi->s_journal);
4617     err = jbd2_journal_destroy(sbi->s_journal);
4618     sbi->s_journal = NULL;
4619     - if (err < 0)
4620     + if ((err < 0) && !aborted)
4621     ext4_abort(sb, "Couldn't clean up the journal");
4622     }
4623    
4624     @@ -847,7 +849,7 @@ static void ext4_put_super(struct super_block *sb)
4625     ext4_mb_release(sb);
4626     ext4_ext_release(sb);
4627    
4628     - if (!(sb->s_flags & MS_RDONLY)) {
4629     + if (!(sb->s_flags & MS_RDONLY) && !aborted) {
4630     ext4_clear_feature_journal_needs_recovery(sb);
4631     es->s_state = cpu_to_le16(sbi->s_mount_state);
4632     }
4633     @@ -3911,7 +3913,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4634     * root first: it may be modified in the journal!
4635     */
4636     if (!test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb)) {
4637     - if (ext4_load_journal(sb, es, journal_devnum))
4638     + err = ext4_load_journal(sb, es, journal_devnum);
4639     + if (err)
4640     goto failed_mount3a;
4641     } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
4642     ext4_has_feature_journal_needs_recovery(sb)) {
4643     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
4644     index d77be9e9f535..4448ed37181b 100644
4645     --- a/fs/ext4/xattr.c
4646     +++ b/fs/ext4/xattr.c
4647     @@ -1174,16 +1174,14 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
4648     struct ext4_xattr_block_find bs = {
4649     .s = { .not_found = -ENODATA, },
4650     };
4651     - unsigned long no_expand;
4652     + int no_expand;
4653     int error;
4654    
4655     if (!name)
4656     return -EINVAL;
4657     if (strlen(name) > 255)
4658     return -ERANGE;
4659     - down_write(&EXT4_I(inode)->xattr_sem);
4660     - no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
4661     - ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
4662     + ext4_write_lock_xattr(inode, &no_expand);
4663    
4664     error = ext4_reserve_inode_write(handle, inode, &is.iloc);
4665     if (error)
4666     @@ -1251,7 +1249,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
4667     ext4_xattr_update_super_block(handle, inode->i_sb);
4668     inode->i_ctime = ext4_current_time(inode);
4669     if (!value)
4670     - ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
4671     + no_expand = 0;
4672     error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
4673     /*
4674     * The bh is consumed by ext4_mark_iloc_dirty, even with
4675     @@ -1265,9 +1263,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
4676     cleanup:
4677     brelse(is.iloc.bh);
4678     brelse(bs.bh);
4679     - if (no_expand == 0)
4680     - ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
4681     - up_write(&EXT4_I(inode)->xattr_sem);
4682     + ext4_write_unlock_xattr(inode, &no_expand);
4683     return error;
4684     }
4685    
4686     @@ -1484,12 +1480,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
4687     int error = 0, tried_min_extra_isize = 0;
4688     int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
4689     int isize_diff; /* How much do we need to grow i_extra_isize */
4690     + int no_expand;
4691     +
4692     + if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
4693     + return 0;
4694    
4695     - down_write(&EXT4_I(inode)->xattr_sem);
4696     - /*
4697     - * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
4698     - */
4699     - ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
4700     retry:
4701     isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
4702     if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4703     @@ -1571,17 +1566,16 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
4704     EXT4_I(inode)->i_extra_isize = new_extra_isize;
4705     brelse(bh);
4706     out:
4707     - ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
4708     - up_write(&EXT4_I(inode)->xattr_sem);
4709     + ext4_write_unlock_xattr(inode, &no_expand);
4710     return 0;
4711    
4712     cleanup:
4713     brelse(bh);
4714     /*
4715     - * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
4716     - * size expansion failed.
4717     + * Inode size expansion failed; don't try again
4718     */
4719     - up_write(&EXT4_I(inode)->xattr_sem);
4720     + no_expand = 1;
4721     + ext4_write_unlock_xattr(inode, &no_expand);
4722     return error;
4723     }
4724    
4725     diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
4726     index a92e783fa057..099c8b670ef5 100644
4727     --- a/fs/ext4/xattr.h
4728     +++ b/fs/ext4/xattr.h
4729     @@ -102,6 +102,38 @@ extern const struct xattr_handler ext4_xattr_security_handler;
4730    
4731     #define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
4732    
4733     +/*
4734     + * The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes.
4735     + * The first is to signal that there the inline xattrs and data are
4736     + * taking up so much space that we might as well not keep trying to
4737     + * expand it. The second is that xattr_sem is taken for writing, so
4738     + * we shouldn't try to recurse into the inode expansion. For this
4739     + * second case, we need to make sure that we take save and restore the
4740     + * NO_EXPAND state flag appropriately.
4741     + */
4742     +static inline void ext4_write_lock_xattr(struct inode *inode, int *save)
4743     +{
4744     + down_write(&EXT4_I(inode)->xattr_sem);
4745     + *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
4746     + ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
4747     +}
4748     +
4749     +static inline int ext4_write_trylock_xattr(struct inode *inode, int *save)
4750     +{
4751     + if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0)
4752     + return 0;
4753     + *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
4754     + ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
4755     + return 1;
4756     +}
4757     +
4758     +static inline void ext4_write_unlock_xattr(struct inode *inode, int *save)
4759     +{
4760     + if (*save == 0)
4761     + ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
4762     + up_write(&EXT4_I(inode)->xattr_sem);
4763     +}
4764     +
4765     extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
4766    
4767     extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
4768     diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
4769     index 369f4513be37..ebdc90fc71b7 100644
4770     --- a/fs/f2fs/dir.c
4771     +++ b/fs/f2fs/dir.c
4772     @@ -207,9 +207,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
4773     f2fs_put_page(dentry_page, 0);
4774     }
4775    
4776     - if (!de && room && F2FS_I(dir)->chash != namehash) {
4777     - F2FS_I(dir)->chash = namehash;
4778     - F2FS_I(dir)->clevel = level;
4779     + /* This is to increase the speed of f2fs_create */
4780     + if (!de && room) {
4781     + F2FS_I(dir)->task = current;
4782     + if (F2FS_I(dir)->chash != namehash) {
4783     + F2FS_I(dir)->chash = namehash;
4784     + F2FS_I(dir)->clevel = level;
4785     + }
4786     }
4787    
4788     return de;
4789     @@ -643,14 +647,34 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
4790     struct inode *inode, nid_t ino, umode_t mode)
4791     {
4792     struct fscrypt_name fname;
4793     + struct page *page = NULL;
4794     + struct f2fs_dir_entry *de = NULL;
4795     int err;
4796    
4797     err = fscrypt_setup_filename(dir, name, 0, &fname);
4798     if (err)
4799     return err;
4800    
4801     - err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
4802     -
4803     + /*
4804     + * An immature stakable filesystem shows a race condition between lookup
4805     + * and create. If we have same task when doing lookup and create, it's
4806     + * definitely fine as expected by VFS normally. Otherwise, let's just
4807     + * verify on-disk dentry one more time, which guarantees filesystem
4808     + * consistency more.
4809     + */
4810     + if (current != F2FS_I(dir)->task) {
4811     + de = __f2fs_find_entry(dir, &fname, &page);
4812     + F2FS_I(dir)->task = NULL;
4813     + }
4814     + if (de) {
4815     + f2fs_dentry_kunmap(dir, page);
4816     + f2fs_put_page(page, 0);
4817     + err = -EEXIST;
4818     + } else if (IS_ERR(page)) {
4819     + err = PTR_ERR(page);
4820     + } else {
4821     + err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
4822     + }
4823     fscrypt_free_filename(&fname);
4824     return err;
4825     }
4826     diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
4827     index 2b06d4fcd954..7b32ce979fe1 100644
4828     --- a/fs/f2fs/extent_cache.c
4829     +++ b/fs/f2fs/extent_cache.c
4830     @@ -352,11 +352,12 @@ static struct extent_node *__try_merge_extent_node(struct inode *inode,
4831     }
4832    
4833     if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
4834     - if (en)
4835     - __release_extent_node(sbi, et, prev_ex);
4836     next_ex->ei.fofs = ei->fofs;
4837     next_ex->ei.blk = ei->blk;
4838     next_ex->ei.len += ei->len;
4839     + if (en)
4840     + __release_extent_node(sbi, et, prev_ex);
4841     +
4842     en = next_ex;
4843     }
4844    
4845     diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
4846     index 506af456412f..3a1640be7ffc 100644
4847     --- a/fs/f2fs/f2fs.h
4848     +++ b/fs/f2fs/f2fs.h
4849     @@ -431,6 +431,7 @@ struct f2fs_inode_info {
4850     atomic_t dirty_pages; /* # of dirty pages */
4851     f2fs_hash_t chash; /* hash value of given file name */
4852     unsigned int clevel; /* maximum level of given file name */
4853     + struct task_struct *task; /* lookup and create consistency */
4854     nid_t i_xattr_nid; /* node id that contains xattrs */
4855     unsigned long long xattr_ver; /* cp version of xattr modification */
4856     loff_t last_disk_size; /* lastly written file size */
4857     @@ -833,6 +834,9 @@ struct f2fs_sb_info {
4858     struct f2fs_gc_kthread *gc_thread; /* GC thread */
4859     unsigned int cur_victim_sec; /* current victim section num */
4860    
4861     + /* threshold for converting bg victims for fg */
4862     + u64 fggc_threshold;
4863     +
4864     /* maximum # of trials to find a victim segment for SSR and GC */
4865     unsigned int max_victim_search;
4866    
4867     diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
4868     index 6f14ee923acd..34a69e7ed90b 100644
4869     --- a/fs/f2fs/gc.c
4870     +++ b/fs/f2fs/gc.c
4871     @@ -166,7 +166,8 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
4872     p->ofs_unit = sbi->segs_per_sec;
4873     }
4874    
4875     - if (p->max_search > sbi->max_victim_search)
4876     + /* we need to check every dirty segments in the FG_GC case */
4877     + if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
4878     p->max_search = sbi->max_victim_search;
4879    
4880     p->offset = sbi->last_victim[p->gc_mode];
4881     @@ -199,6 +200,10 @@ static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
4882     for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
4883     if (sec_usage_check(sbi, secno))
4884     continue;
4885     +
4886     + if (no_fggc_candidate(sbi, secno))
4887     + continue;
4888     +
4889     clear_bit(secno, dirty_i->victim_secmap);
4890     return secno * sbi->segs_per_sec;
4891     }
4892     @@ -322,13 +327,15 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
4893     nsearched++;
4894     }
4895    
4896     -
4897     secno = GET_SECNO(sbi, segno);
4898    
4899     if (sec_usage_check(sbi, secno))
4900     goto next;
4901     if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
4902     goto next;
4903     + if (gc_type == FG_GC && p.alloc_mode == LFS &&
4904     + no_fggc_candidate(sbi, secno))
4905     + goto next;
4906    
4907     cost = get_gc_cost(sbi, segno, &p);
4908    
4909     @@ -972,5 +979,16 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
4910    
4911     void build_gc_manager(struct f2fs_sb_info *sbi)
4912     {
4913     + u64 main_count, resv_count, ovp_count, blocks_per_sec;
4914     +
4915     DIRTY_I(sbi)->v_ops = &default_v_ops;
4916     +
4917     + /* threshold of # of valid blocks in a section for victims of FG_GC */
4918     + main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
4919     + resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
4920     + ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
4921     + blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
4922     +
4923     + sbi->fggc_threshold = div_u64((main_count - ovp_count) * blocks_per_sec,
4924     + (main_count - resv_count));
4925     }
4926     diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
4927     index fc886f008449..a7943f861d68 100644
4928     --- a/fs/f2fs/segment.c
4929     +++ b/fs/f2fs/segment.c
4930     @@ -813,6 +813,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
4931     start = start_segno + sbi->segs_per_sec;
4932     if (start < end)
4933     goto next;
4934     + else
4935     + end = start - 1;
4936     }
4937     mutex_unlock(&dirty_i->seglist_lock);
4938    
4939     diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
4940     index fecb856ad874..b164f8339281 100644
4941     --- a/fs/f2fs/segment.h
4942     +++ b/fs/f2fs/segment.h
4943     @@ -688,6 +688,15 @@ static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
4944     - (base + 1) + type;
4945     }
4946    
4947     +static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
4948     + unsigned int secno)
4949     +{
4950     + if (get_valid_blocks(sbi, secno, sbi->segs_per_sec) >=
4951     + sbi->fggc_threshold)
4952     + return true;
4953     + return false;
4954     +}
4955     +
4956     static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
4957     {
4958     if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
4959     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
4960     index 2401c5dabb2a..5ec5870e423a 100644
4961     --- a/fs/fuse/file.c
4962     +++ b/fs/fuse/file.c
4963     @@ -100,6 +100,7 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
4964     iput(req->misc.release.inode);
4965     fuse_put_request(ff->fc, req);
4966     } else if (sync) {
4967     + __set_bit(FR_FORCE, &req->flags);
4968     __clear_bit(FR_BACKGROUND, &req->flags);
4969     fuse_request_send(ff->fc, req);
4970     iput(req->misc.release.inode);
4971     diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
4972     index 14cbf60167a7..133f322573b5 100644
4973     --- a/fs/gfs2/glock.c
4974     +++ b/fs/gfs2/glock.c
4975     @@ -658,9 +658,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
4976     struct kmem_cache *cachep;
4977     int ret, tries = 0;
4978    
4979     + rcu_read_lock();
4980     gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
4981     if (gl && !lockref_get_not_dead(&gl->gl_lockref))
4982     gl = NULL;
4983     + rcu_read_unlock();
4984    
4985     *glp = gl;
4986     if (gl)
4987     @@ -728,15 +730,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
4988    
4989     if (ret == -EEXIST) {
4990     ret = 0;
4991     + rcu_read_lock();
4992     tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
4993     if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
4994     if (++tries < 100) {
4995     + rcu_read_unlock();
4996     cond_resched();
4997     goto again;
4998     }
4999     tmp = NULL;
5000     ret = -ENOMEM;
5001     }
5002     + rcu_read_unlock();
5003     } else {
5004     WARN_ON_ONCE(ret);
5005     }
5006     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
5007     index e1652665bd93..5e659ee08d6a 100644
5008     --- a/fs/jbd2/transaction.c
5009     +++ b/fs/jbd2/transaction.c
5010     @@ -1863,7 +1863,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
5011    
5012     __blist_del_buffer(list, jh);
5013     jh->b_jlist = BJ_None;
5014     - if (test_clear_buffer_jbddirty(bh))
5015     + if (transaction && is_journal_aborted(transaction->t_journal))
5016     + clear_buffer_jbddirty(bh);
5017     + else if (test_clear_buffer_jbddirty(bh))
5018     mark_buffer_dirty(bh); /* Expose it to the VM */
5019     }
5020    
5021     diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
5022     index a5c38889e7ae..13abd608af0f 100644
5023     --- a/fs/nfs/flexfilelayout/flexfilelayout.c
5024     +++ b/fs/nfs/flexfilelayout/flexfilelayout.c
5025     @@ -1073,9 +1073,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
5026     struct nfs_client *mds_client = mds_server->nfs_client;
5027     struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
5028    
5029     - if (task->tk_status >= 0)
5030     - return 0;
5031     -
5032     switch (task->tk_status) {
5033     /* MDS state errors */
5034     case -NFS4ERR_DELEG_REVOKED:
5035     @@ -1176,9 +1173,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
5036     {
5037     struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
5038    
5039     - if (task->tk_status >= 0)
5040     - return 0;
5041     -
5042     switch (task->tk_status) {
5043     /* File access problems. Don't mark the device as unavailable */
5044     case -EACCES:
5045     @@ -1213,6 +1207,13 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
5046     {
5047     int vers = clp->cl_nfs_mod->rpc_vers->number;
5048    
5049     + if (task->tk_status >= 0)
5050     + return 0;
5051     +
5052     + /* Handle the case of an invalid layout segment */
5053     + if (!pnfs_is_valid_lseg(lseg))
5054     + return -NFS4ERR_RESET_TO_PNFS;
5055     +
5056     switch (vers) {
5057     case 3:
5058     return ff_layout_async_handle_error_v3(task, lseg, idx);
5059     diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
5060     index 608501971fe0..5cda392028ce 100644
5061     --- a/fs/nfs/nfs42proc.c
5062     +++ b/fs/nfs/nfs42proc.c
5063     @@ -128,30 +128,26 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
5064     return err;
5065     }
5066    
5067     -static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
5068     +static ssize_t _nfs42_proc_copy(struct file *src,
5069     struct nfs_lock_context *src_lock,
5070     - struct file *dst, loff_t pos_dst,
5071     + struct file *dst,
5072     struct nfs_lock_context *dst_lock,
5073     - size_t count)
5074     + struct nfs42_copy_args *args,
5075     + struct nfs42_copy_res *res)
5076     {
5077     - struct nfs42_copy_args args = {
5078     - .src_fh = NFS_FH(file_inode(src)),
5079     - .src_pos = pos_src,
5080     - .dst_fh = NFS_FH(file_inode(dst)),
5081     - .dst_pos = pos_dst,
5082     - .count = count,
5083     - };
5084     - struct nfs42_copy_res res;
5085     struct rpc_message msg = {
5086     .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
5087     - .rpc_argp = &args,
5088     - .rpc_resp = &res,
5089     + .rpc_argp = args,
5090     + .rpc_resp = res,
5091     };
5092     struct inode *dst_inode = file_inode(dst);
5093     struct nfs_server *server = NFS_SERVER(dst_inode);
5094     + loff_t pos_src = args->src_pos;
5095     + loff_t pos_dst = args->dst_pos;
5096     + size_t count = args->count;
5097     int status;
5098    
5099     - status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
5100     + status = nfs4_set_rw_stateid(&args->src_stateid, src_lock->open_context,
5101     src_lock, FMODE_READ);
5102     if (status)
5103     return status;
5104     @@ -161,7 +157,7 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
5105     if (status)
5106     return status;
5107    
5108     - status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
5109     + status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
5110     dst_lock, FMODE_WRITE);
5111     if (status)
5112     return status;
5113     @@ -171,22 +167,22 @@ static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
5114     return status;
5115    
5116     status = nfs4_call_sync(server->client, server, &msg,
5117     - &args.seq_args, &res.seq_res, 0);
5118     + &args->seq_args, &res->seq_res, 0);
5119     if (status == -ENOTSUPP)
5120     server->caps &= ~NFS_CAP_COPY;
5121     if (status)
5122     return status;
5123    
5124     - if (res.write_res.verifier.committed != NFS_FILE_SYNC) {
5125     - status = nfs_commit_file(dst, &res.write_res.verifier.verifier);
5126     + if (res->write_res.verifier.committed != NFS_FILE_SYNC) {
5127     + status = nfs_commit_file(dst, &res->write_res.verifier.verifier);
5128     if (status)
5129     return status;
5130     }
5131    
5132     truncate_pagecache_range(dst_inode, pos_dst,
5133     - pos_dst + res.write_res.count);
5134     + pos_dst + res->write_res.count);
5135    
5136     - return res.write_res.count;
5137     + return res->write_res.count;
5138     }
5139    
5140     ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
5141     @@ -196,8 +192,22 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
5142     struct nfs_server *server = NFS_SERVER(file_inode(dst));
5143     struct nfs_lock_context *src_lock;
5144     struct nfs_lock_context *dst_lock;
5145     - struct nfs4_exception src_exception = { };
5146     - struct nfs4_exception dst_exception = { };
5147     + struct nfs42_copy_args args = {
5148     + .src_fh = NFS_FH(file_inode(src)),
5149     + .src_pos = pos_src,
5150     + .dst_fh = NFS_FH(file_inode(dst)),
5151     + .dst_pos = pos_dst,
5152     + .count = count,
5153     + };
5154     + struct nfs42_copy_res res;
5155     + struct nfs4_exception src_exception = {
5156     + .inode = file_inode(src),
5157     + .stateid = &args.src_stateid,
5158     + };
5159     + struct nfs4_exception dst_exception = {
5160     + .inode = file_inode(dst),
5161     + .stateid = &args.dst_stateid,
5162     + };
5163     ssize_t err, err2;
5164    
5165     if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
5166     @@ -207,7 +217,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
5167     if (IS_ERR(src_lock))
5168     return PTR_ERR(src_lock);
5169    
5170     - src_exception.inode = file_inode(src);
5171     src_exception.state = src_lock->open_context->state;
5172    
5173     dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
5174     @@ -216,15 +225,17 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
5175     goto out_put_src_lock;
5176     }
5177    
5178     - dst_exception.inode = file_inode(dst);
5179     dst_exception.state = dst_lock->open_context->state;
5180    
5181     do {
5182     inode_lock(file_inode(dst));
5183     - err = _nfs42_proc_copy(src, pos_src, src_lock,
5184     - dst, pos_dst, dst_lock, count);
5185     + err = _nfs42_proc_copy(src, src_lock,
5186     + dst, dst_lock,
5187     + &args, &res);
5188     inode_unlock(file_inode(dst));
5189    
5190     + if (err >= 0)
5191     + break;
5192     if (err == -ENOTSUPP) {
5193     err = -EOPNOTSUPP;
5194     break;
5195     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5196     index 78ff8b63d5f7..609840de31d3 100644
5197     --- a/fs/nfs/nfs4proc.c
5198     +++ b/fs/nfs/nfs4proc.c
5199     @@ -2708,6 +2708,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
5200     ret = PTR_ERR(state);
5201     if (IS_ERR(state))
5202     goto out;
5203     + ctx->state = state;
5204     if (server->caps & NFS_CAP_POSIX_LOCK)
5205     set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
5206     if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
5207     @@ -2733,7 +2734,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
5208     if (ret != 0)
5209     goto out;
5210    
5211     - ctx->state = state;
5212     if (d_inode(dentry) == state->inode) {
5213     nfs_inode_attach_open_context(ctx);
5214     if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
5215     @@ -4990,7 +4990,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
5216     */
5217     static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5218     {
5219     - struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
5220     + struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
5221     struct nfs_getaclargs args = {
5222     .fh = NFS_FH(inode),
5223     .acl_pages = pages,
5224     @@ -5004,13 +5004,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
5225     .rpc_argp = &args,
5226     .rpc_resp = &res,
5227     };
5228     - unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
5229     + unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5230     int ret = -ENOMEM, i;
5231    
5232     - /* As long as we're doing a round trip to the server anyway,
5233     - * let's be prepared for a page of acl data. */
5234     - if (npages == 0)
5235     - npages = 1;
5236     if (npages > ARRAY_SIZE(pages))
5237     return -ERANGE;
5238    
5239     diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
5240     index fc89e5ed07ee..c9c4d9855976 100644
5241     --- a/fs/nfs/nfs4xdr.c
5242     +++ b/fs/nfs/nfs4xdr.c
5243     @@ -2492,7 +2492,7 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
5244     encode_compound_hdr(xdr, req, &hdr);
5245     encode_sequence(xdr, &args->seq_args, &hdr);
5246     encode_putfh(xdr, args->fh, &hdr);
5247     - replen = hdr.replen + op_decode_hdr_maxsz + 1;
5248     + replen = hdr.replen + op_decode_hdr_maxsz;
5249     encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
5250    
5251     xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
5252     diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
5253     index 8ca642fe9b21..b829cc9a9b39 100644
5254     --- a/fs/nfsd/vfs.c
5255     +++ b/fs/nfsd/vfs.c
5256     @@ -377,7 +377,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
5257     __be32 err;
5258     int host_err;
5259     bool get_write_count;
5260     - int size_change = 0;
5261     + bool size_change = (iap->ia_valid & ATTR_SIZE);
5262    
5263     if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
5264     accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
5265     @@ -390,11 +390,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
5266     /* Get inode */
5267     err = fh_verify(rqstp, fhp, ftype, accmode);
5268     if (err)
5269     - goto out;
5270     + return err;
5271     if (get_write_count) {
5272     host_err = fh_want_write(fhp);
5273     if (host_err)
5274     - return nfserrno(host_err);
5275     + goto out;
5276     }
5277    
5278     dentry = fhp->fh_dentry;
5279     @@ -405,20 +405,28 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
5280     iap->ia_valid &= ~ATTR_MODE;
5281    
5282     if (!iap->ia_valid)
5283     - goto out;
5284     + return 0;
5285    
5286     nfsd_sanitize_attrs(inode, iap);
5287    
5288     + if (check_guard && guardtime != inode->i_ctime.tv_sec)
5289     + return nfserr_notsync;
5290     +
5291     /*
5292     * The size case is special, it changes the file in addition to the
5293     - * attributes.
5294     + * attributes, and file systems don't expect it to be mixed with
5295     + * "random" attribute changes. We thus split out the size change
5296     + * into a separate call to ->setattr, and do the rest as a separate
5297     + * setattr call.
5298     */
5299     - if (iap->ia_valid & ATTR_SIZE) {
5300     + if (size_change) {
5301     err = nfsd_get_write_access(rqstp, fhp, iap);
5302     if (err)
5303     - goto out;
5304     - size_change = 1;
5305     + return err;
5306     + }
5307    
5308     + fh_lock(fhp);
5309     + if (size_change) {
5310     /*
5311     * RFC5661, Section 18.30.4:
5312     * Changing the size of a file with SETATTR indirectly
5313     @@ -426,29 +434,36 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
5314     *
5315     * (and similar for the older RFCs)
5316     */
5317     - if (iap->ia_size != i_size_read(inode))
5318     - iap->ia_valid |= ATTR_MTIME;
5319     - }
5320     + struct iattr size_attr = {
5321     + .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME,
5322     + .ia_size = iap->ia_size,
5323     + };
5324    
5325     - iap->ia_valid |= ATTR_CTIME;
5326     + host_err = notify_change(dentry, &size_attr, NULL);
5327     + if (host_err)
5328     + goto out_unlock;
5329     + iap->ia_valid &= ~ATTR_SIZE;
5330    
5331     - if (check_guard && guardtime != inode->i_ctime.tv_sec) {
5332     - err = nfserr_notsync;
5333     - goto out_put_write_access;
5334     + /*
5335     + * Avoid the additional setattr call below if the only other
5336     + * attribute that the client sends is the mtime, as we update
5337     + * it as part of the size change above.
5338     + */
5339     + if ((iap->ia_valid & ~ATTR_MTIME) == 0)
5340     + goto out_unlock;
5341     }
5342    
5343     - fh_lock(fhp);
5344     + iap->ia_valid |= ATTR_CTIME;
5345     host_err = notify_change(dentry, iap, NULL);
5346     - fh_unlock(fhp);
5347     - err = nfserrno(host_err);
5348    
5349     -out_put_write_access:
5350     +out_unlock:
5351     + fh_unlock(fhp);
5352     if (size_change)
5353     put_write_access(inode);
5354     - if (!err)
5355     - err = nfserrno(commit_metadata(fhp));
5356     out:
5357     - return err;
5358     + if (!host_err)
5359     + host_err = commit_metadata(fhp);
5360     + return nfserrno(host_err);
5361     }
5362    
5363     #if defined(CONFIG_NFSD_V4)
5364     diff --git a/include/linux/compat.h b/include/linux/compat.h
5365     index 63609398ef9f..d8535a430caf 100644
5366     --- a/include/linux/compat.h
5367     +++ b/include/linux/compat.h
5368     @@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
5369     compat_stack_t __user *__uss = uss; \
5370     struct task_struct *t = current; \
5371     put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
5372     - put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
5373     + put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
5374     put_user_ex(t->sas_ss_size, &__uss->ss_size); \
5375     + if (t->sas_ss_flags & SS_AUTODISARM) \
5376     + sas_ss_reset(t); \
5377     } while (0);
5378    
5379     asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
5380     diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
5381     index 2de4e2eea180..e0acb0e5243b 100644
5382     --- a/include/linux/devfreq.h
5383     +++ b/include/linux/devfreq.h
5384     @@ -104,6 +104,8 @@ struct devfreq_dev_profile {
5385     * struct devfreq_governor - Devfreq policy governor
5386     * @node: list node - contains registered devfreq governors
5387     * @name: Governor's name
5388     + * @immutable: Immutable flag for governor. If the value is 1,
5389     + * this govenror is never changeable to other governor.
5390     * @get_target_freq: Returns desired operating frequency for the device.
5391     * Basically, get_target_freq will run
5392     * devfreq_dev_profile.get_dev_status() to get the
5393     @@ -121,6 +123,7 @@ struct devfreq_governor {
5394     struct list_head node;
5395    
5396     const char name[DEVFREQ_NAME_LEN];
5397     + const unsigned int immutable;
5398     int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
5399     int (*event_handler)(struct devfreq *devfreq,
5400     unsigned int event, void *data);
5401     diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
5402     index 3f9778cbc79d..c332f0a45607 100644
5403     --- a/include/linux/fsl_ifc.h
5404     +++ b/include/linux/fsl_ifc.h
5405     @@ -733,8 +733,12 @@ struct fsl_ifc_nand {
5406     __be32 nand_erattr1;
5407     u32 res19[0x10];
5408     __be32 nand_fsr;
5409     - u32 res20[0x3];
5410     - __be32 nand_eccstat[6];
5411     + u32 res20;
5412     + /* The V1 nand_eccstat is actually 4 words that overlaps the
5413     + * V2 nand_eccstat.
5414     + */
5415     + __be32 v1_nand_eccstat[2];
5416     + __be32 v2_nand_eccstat[6];
5417     u32 res21[0x1c];
5418     __be32 nanndcr;
5419     u32 res22[0x2];
5420     diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
5421     index c92a083bcf16..192eef2fd766 100644
5422     --- a/include/linux/hyperv.h
5423     +++ b/include/linux/hyperv.h
5424     @@ -641,6 +641,7 @@ struct vmbus_channel_msginfo {
5425    
5426     /* Synchronize the request/response if needed */
5427     struct completion waitevent;
5428     + struct vmbus_channel *waiting_channel;
5429     union {
5430     struct vmbus_channel_version_supported version_supported;
5431     struct vmbus_channel_open_result open_result;
5432     diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
5433     index d49e26c6cdc7..23e129ef6726 100644
5434     --- a/include/linux/intel-iommu.h
5435     +++ b/include/linux/intel-iommu.h
5436     @@ -153,8 +153,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
5437     #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
5438     #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
5439     #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
5440     -#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
5441     -#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
5442     +#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
5443     +#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
5444     #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
5445     #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
5446     #define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
5447     @@ -164,9 +164,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
5448    
5449     /* INVALID_DESC */
5450     #define DMA_CCMD_INVL_GRANU_OFFSET 61
5451     -#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
5452     -#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
5453     -#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
5454     +#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
5455     +#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
5456     +#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
5457     #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
5458     #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
5459     #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
5460     @@ -316,8 +316,8 @@ enum {
5461     #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
5462     #define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
5463     #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
5464     -#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
5465     -#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
5466     +#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
5467     +#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
5468     #define QI_DEV_EIOTLB_MAX_INVS 32
5469    
5470     #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
5471     diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
5472     index f99c993dd500..7e273e243a13 100644
5473     --- a/include/linux/mmzone.h
5474     +++ b/include/linux/mmzone.h
5475     @@ -779,7 +779,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
5476     #endif
5477     }
5478    
5479     -extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);
5480     +extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
5481    
5482     #ifdef CONFIG_HAVE_MEMORY_PRESENT
5483     void memory_present(int nid, unsigned long start, unsigned long end);
5484     diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
5485     index 5ee7aab95eb8..fd0e53219f93 100644
5486     --- a/include/rdma/ib_sa.h
5487     +++ b/include/rdma/ib_sa.h
5488     @@ -153,12 +153,12 @@ struct ib_sa_path_rec {
5489     union ib_gid sgid;
5490     __be16 dlid;
5491     __be16 slid;
5492     - int raw_traffic;
5493     + u8 raw_traffic;
5494     /* reserved */
5495     __be32 flow_label;
5496     u8 hop_limit;
5497     u8 traffic_class;
5498     - int reversible;
5499     + u8 reversible;
5500     u8 numb_path;
5501     __be16 pkey;
5502     __be16 qos_class;
5503     @@ -220,7 +220,7 @@ struct ib_sa_mcmember_rec {
5504     u8 hop_limit;
5505     u8 scope;
5506     u8 join_state;
5507     - int proxy_join;
5508     + u8 proxy_join;
5509     };
5510    
5511     /* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */
5512     diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
5513     index 8a9563144890..b9ec4939b80c 100644
5514     --- a/include/scsi/scsi_device.h
5515     +++ b/include/scsi/scsi_device.h
5516     @@ -315,6 +315,7 @@ extern void scsi_remove_device(struct scsi_device *);
5517     extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
5518     void scsi_attach_vpd(struct scsi_device *sdev);
5519    
5520     +extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
5521     extern int scsi_device_get(struct scsi_device *);
5522     extern void scsi_device_put(struct scsi_device *);
5523     extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
5524     diff --git a/include/soc/at91/at91sam9_ddrsdr.h b/include/soc/at91/at91sam9_ddrsdr.h
5525     index dc10c52e0e91..393362bdb860 100644
5526     --- a/include/soc/at91/at91sam9_ddrsdr.h
5527     +++ b/include/soc/at91/at91sam9_ddrsdr.h
5528     @@ -81,6 +81,7 @@
5529     #define AT91_DDRSDRC_LPCB_POWER_DOWN 2
5530     #define AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN 3
5531     #define AT91_DDRSDRC_CLKFR (1 << 2) /* Clock Frozen */
5532     +#define AT91_DDRSDRC_LPDDR2_PWOFF (1 << 3) /* LPDDR Power Off */
5533     #define AT91_DDRSDRC_PASR (7 << 4) /* Partial Array Self Refresh */
5534     #define AT91_DDRSDRC_TCSR (3 << 8) /* Temperature Compensated Self Refresh */
5535     #define AT91_DDRSDRC_DS (3 << 10) /* Drive Strength */
5536     @@ -96,7 +97,9 @@
5537     #define AT91_DDRSDRC_MD_SDR 0
5538     #define AT91_DDRSDRC_MD_LOW_POWER_SDR 1
5539     #define AT91_DDRSDRC_MD_LOW_POWER_DDR 3
5540     +#define AT91_DDRSDRC_MD_LPDDR3 5
5541     #define AT91_DDRSDRC_MD_DDR2 6 /* [SAM9 Only] */
5542     +#define AT91_DDRSDRC_MD_LPDDR2 7
5543     #define AT91_DDRSDRC_DBW (1 << 4) /* Data Bus Width */
5544     #define AT91_DDRSDRC_DBW_32BITS (0 << 4)
5545     #define AT91_DDRSDRC_DBW_16BITS (1 << 4)
5546     diff --git a/ipc/shm.c b/ipc/shm.c
5547     index dbac8860c721..e2072ae4f90e 100644
5548     --- a/ipc/shm.c
5549     +++ b/ipc/shm.c
5550     @@ -1085,8 +1085,8 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
5551     * "raddr" thing points to kernel space, and there has to be a wrapper around
5552     * this.
5553     */
5554     -long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
5555     - unsigned long shmlba)
5556     +long do_shmat(int shmid, char __user *shmaddr, int shmflg,
5557     + ulong *raddr, unsigned long shmlba)
5558     {
5559     struct shmid_kernel *shp;
5560     unsigned long addr;
5561     @@ -1107,8 +1107,13 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
5562     goto out;
5563     else if ((addr = (ulong)shmaddr)) {
5564     if (addr & (shmlba - 1)) {
5565     - if (shmflg & SHM_RND)
5566     - addr &= ~(shmlba - 1); /* round down */
5567     + /*
5568     + * Round down to the nearest multiple of shmlba.
5569     + * For sane do_mmap_pgoff() parameters, avoid
5570     + * round downs that trigger nil-page and MAP_FIXED.
5571     + */
5572     + if ((shmflg & SHM_RND) && addr >= shmlba)
5573     + addr &= ~(shmlba - 1);
5574     else
5575     #ifndef __ARCH_FORCE_SHMLBA
5576     if (addr & ~PAGE_MASK)
5577     diff --git a/kernel/membarrier.c b/kernel/membarrier.c
5578     index 536c727a56e9..9f9284f37f8d 100644
5579     --- a/kernel/membarrier.c
5580     +++ b/kernel/membarrier.c
5581     @@ -16,6 +16,7 @@
5582    
5583     #include <linux/syscalls.h>
5584     #include <linux/membarrier.h>
5585     +#include <linux/tick.h>
5586    
5587     /*
5588     * Bitmask made from a "or" of all commands within enum membarrier_cmd,
5589     @@ -51,6 +52,9 @@
5590     */
5591     SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
5592     {
5593     + /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */
5594     + if (tick_nohz_full_enabled())
5595     + return -ENOSYS;
5596     if (unlikely(flags))
5597     return -EINVAL;
5598     switch (cmd) {
5599     diff --git a/kernel/memremap.c b/kernel/memremap.c
5600     index 9ecedc28b928..06123234f118 100644
5601     --- a/kernel/memremap.c
5602     +++ b/kernel/memremap.c
5603     @@ -246,9 +246,13 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
5604     /* pages are dead and unused, undo the arch mapping */
5605     align_start = res->start & ~(SECTION_SIZE - 1);
5606     align_size = ALIGN(resource_size(res), SECTION_SIZE);
5607     +
5608     + lock_device_hotplug();
5609     mem_hotplug_begin();
5610     arch_remove_memory(align_start, align_size);
5611     mem_hotplug_done();
5612     + unlock_device_hotplug();
5613     +
5614     untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
5615     pgmap_radix_release(res);
5616     dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
5617     @@ -360,9 +364,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
5618     if (error)
5619     goto err_pfn_remap;
5620    
5621     + lock_device_hotplug();
5622     mem_hotplug_begin();
5623     error = arch_add_memory(nid, align_start, align_size, true);
5624     mem_hotplug_done();
5625     + unlock_device_hotplug();
5626     if (error)
5627     goto err_add_memory;
5628    
5629     diff --git a/kernel/signal.c b/kernel/signal.c
5630     index 75761acc77cf..0b1415720a15 100644
5631     --- a/kernel/signal.c
5632     +++ b/kernel/signal.c
5633     @@ -3226,10 +3226,17 @@ int compat_restore_altstack(const compat_stack_t __user *uss)
5634    
5635     int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
5636     {
5637     + int err;
5638     struct task_struct *t = current;
5639     - return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
5640     - __put_user(sas_ss_flags(sp), &uss->ss_flags) |
5641     + err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
5642     + &uss->ss_sp) |
5643     + __put_user(t->sas_ss_flags, &uss->ss_flags) |
5644     __put_user(t->sas_ss_size, &uss->ss_size);
5645     + if (err)
5646     + return err;
5647     + if (t->sas_ss_flags & SS_AUTODISARM)
5648     + sas_ss_reset(t);
5649     + return 0;
5650     }
5651     #endif
5652    
5653     diff --git a/mm/filemap.c b/mm/filemap.c
5654     index d8d7df82c69a..edfb90e3830c 100644
5655     --- a/mm/filemap.c
5656     +++ b/mm/filemap.c
5657     @@ -910,9 +910,12 @@ void page_endio(struct page *page, bool is_write, int err)
5658     unlock_page(page);
5659     } else {
5660     if (err) {
5661     + struct address_space *mapping;
5662     +
5663     SetPageError(page);
5664     - if (page->mapping)
5665     - mapping_set_error(page->mapping, err);
5666     + mapping = page_mapping(page);
5667     + if (mapping)
5668     + mapping_set_error(mapping, err);
5669     }
5670     end_page_writeback(page);
5671     }
5672     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5673     index f4a02e240fb6..1460e6ad5e14 100644
5674     --- a/mm/page_alloc.c
5675     +++ b/mm/page_alloc.c
5676     @@ -2858,7 +2858,7 @@ bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
5677     #ifdef CONFIG_NUMA
5678     static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
5679     {
5680     - return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
5681     + return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
5682     RECLAIM_DISTANCE;
5683     }
5684     #else /* CONFIG_NUMA */
5685     diff --git a/mm/vmpressure.c b/mm/vmpressure.c
5686     index 149fdf6c5c56..6063581f705c 100644
5687     --- a/mm/vmpressure.c
5688     +++ b/mm/vmpressure.c
5689     @@ -112,9 +112,16 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
5690     unsigned long reclaimed)
5691     {
5692     unsigned long scale = scanned + reclaimed;
5693     - unsigned long pressure;
5694     + unsigned long pressure = 0;
5695    
5696     /*
5697     + * reclaimed can be greater than scanned in cases
5698     + * like THP, where the scanned is 1 and reclaimed
5699     + * could be 512
5700     + */
5701     + if (reclaimed >= scanned)
5702     + goto out;
5703     + /*
5704     * We calculate the ratio (in percents) of how many pages were
5705     * scanned vs. reclaimed in a given time frame (window). Note that
5706     * time is in VM reclaimer's "ticks", i.e. number of pages
5707     @@ -124,6 +131,7 @@ static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
5708     pressure = scale - (reclaimed * scale / scanned);
5709     pressure = pressure * 100 / scale;
5710    
5711     +out:
5712     pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
5713     scanned, reclaimed);
5714    
5715     diff --git a/mm/vmscan.c b/mm/vmscan.c
5716     index fa30010a5277..30a88b945a44 100644
5717     --- a/mm/vmscan.c
5718     +++ b/mm/vmscan.c
5719     @@ -234,22 +234,39 @@ bool pgdat_reclaimable(struct pglist_data *pgdat)
5720     pgdat_reclaimable_pages(pgdat) * 6;
5721     }
5722    
5723     -unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
5724     +/**
5725     + * lruvec_lru_size - Returns the number of pages on the given LRU list.
5726     + * @lruvec: lru vector
5727     + * @lru: lru to use
5728     + * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
5729     + */
5730     +unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
5731     {
5732     + unsigned long lru_size;
5733     + int zid;
5734     +
5735     if (!mem_cgroup_disabled())
5736     - return mem_cgroup_get_lru_size(lruvec, lru);
5737     + lru_size = mem_cgroup_get_lru_size(lruvec, lru);
5738     + else
5739     + lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
5740    
5741     - return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
5742     -}
5743     + for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
5744     + struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
5745     + unsigned long size;
5746    
5747     -unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
5748     - int zone_idx)
5749     -{
5750     - if (!mem_cgroup_disabled())
5751     - return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
5752     + if (!managed_zone(zone))
5753     + continue;
5754     +
5755     + if (!mem_cgroup_disabled())
5756     + size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
5757     + else
5758     + size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
5759     + NR_ZONE_LRU_BASE + lru);
5760     + lru_size -= min(size, lru_size);
5761     + }
5762     +
5763     + return lru_size;
5764    
5765     - return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
5766     - NR_ZONE_LRU_BASE + lru);
5767     }
5768    
5769     /*
5770     @@ -2028,11 +2045,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
5771     struct scan_control *sc)
5772     {
5773     unsigned long inactive_ratio;
5774     - unsigned long inactive;
5775     - unsigned long active;
5776     + unsigned long inactive, active;
5777     + enum lru_list inactive_lru = file * LRU_FILE;
5778     + enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
5779     unsigned long gb;
5780     - struct pglist_data *pgdat = lruvec_pgdat(lruvec);
5781     - int zid;
5782    
5783     /*
5784     * If we don't have swap space, anonymous page deactivation
5785     @@ -2041,27 +2057,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
5786     if (!file && !total_swap_pages)
5787     return false;
5788    
5789     - inactive = lruvec_lru_size(lruvec, file * LRU_FILE);
5790     - active = lruvec_lru_size(lruvec, file * LRU_FILE + LRU_ACTIVE);
5791     -
5792     - /*
5793     - * For zone-constrained allocations, it is necessary to check if
5794     - * deactivations are required for lowmem to be reclaimed. This
5795     - * calculates the inactive/active pages available in eligible zones.
5796     - */
5797     - for (zid = sc->reclaim_idx + 1; zid < MAX_NR_ZONES; zid++) {
5798     - struct zone *zone = &pgdat->node_zones[zid];
5799     - unsigned long inactive_zone, active_zone;
5800     -
5801     - if (!managed_zone(zone))
5802     - continue;
5803     -
5804     - inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
5805     - active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
5806     -
5807     - inactive -= min(inactive, inactive_zone);
5808     - active -= min(active, active_zone);
5809     - }
5810     + inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
5811     + active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
5812    
5813     gb = (inactive + active) >> (30 - PAGE_SHIFT);
5814     if (gb)
5815     @@ -2208,7 +2205,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
5816     * system is under heavy pressure.
5817     */
5818     if (!inactive_list_is_low(lruvec, true, sc) &&
5819     - lruvec_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
5820     + lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, sc->reclaim_idx) >> sc->priority) {
5821     scan_balance = SCAN_FILE;
5822     goto out;
5823     }
5824     @@ -2234,10 +2231,10 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
5825     * anon in [0], file in [1]
5826     */
5827    
5828     - anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON) +
5829     - lruvec_lru_size(lruvec, LRU_INACTIVE_ANON);
5830     - file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE) +
5831     - lruvec_lru_size(lruvec, LRU_INACTIVE_FILE);
5832     + anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
5833     + lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
5834     + file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
5835     + lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
5836    
5837     spin_lock_irq(&pgdat->lru_lock);
5838     if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
5839     @@ -2275,7 +2272,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
5840     unsigned long size;
5841     unsigned long scan;
5842    
5843     - size = lruvec_lru_size(lruvec, lru);
5844     + size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
5845     scan = size >> sc->priority;
5846    
5847     if (!scan && pass && force_scan)
5848     diff --git a/mm/workingset.c b/mm/workingset.c
5849     index fb1f9183d89a..33f6f4db32fd 100644
5850     --- a/mm/workingset.c
5851     +++ b/mm/workingset.c
5852     @@ -266,7 +266,7 @@ bool workingset_refault(void *shadow)
5853     }
5854     lruvec = mem_cgroup_lruvec(pgdat, memcg);
5855     refault = atomic_long_read(&lruvec->inactive_age);
5856     - active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
5857     + active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
5858     rcu_read_unlock();
5859    
5860     /*
5861     diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
5862     index e6ae15bc41b7..0ffeb60cfe67 100644
5863     --- a/net/ceph/osd_client.c
5864     +++ b/net/ceph/osd_client.c
5865     @@ -672,7 +672,8 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
5866     BUG_ON(length > previous);
5867    
5868     op->extent.length = length;
5869     - op->indata_len -= previous - length;
5870     + if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
5871     + op->indata_len -= previous - length;
5872     }
5873     EXPORT_SYMBOL(osd_req_op_extent_update);
5874    
5875     diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
5876     index 0f87e5d21be7..6bd150882ba4 100644
5877     --- a/net/netfilter/nf_conntrack_core.c
5878     +++ b/net/netfilter/nf_conntrack_core.c
5879     @@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
5880     static __read_mostly bool nf_conntrack_locks_all;
5881    
5882     /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
5883     -#define GC_MAX_BUCKETS_DIV 64u
5884     -/* upper bound of scan intervals */
5885     -#define GC_INTERVAL_MAX (2 * HZ)
5886     -/* maximum conntracks to evict per gc run */
5887     -#define GC_MAX_EVICTS 256u
5888     +#define GC_MAX_BUCKETS_DIV 128u
5889     +/* upper bound of full table scan */
5890     +#define GC_MAX_SCAN_JIFFIES (16u * HZ)
5891     +/* desired ratio of entries found to be expired */
5892     +#define GC_EVICT_RATIO 50u
5893    
5894     static struct conntrack_gc_work conntrack_gc_work;
5895    
5896     @@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
5897    
5898     static void gc_worker(struct work_struct *work)
5899     {
5900     + unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
5901     unsigned int i, goal, buckets = 0, expired_count = 0;
5902     struct conntrack_gc_work *gc_work;
5903     unsigned int ratio, scanned = 0;
5904     @@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
5905     */
5906     rcu_read_unlock();
5907     cond_resched_rcu_qs();
5908     - } while (++buckets < goal &&
5909     - expired_count < GC_MAX_EVICTS);
5910     + } while (++buckets < goal);
5911    
5912     if (gc_work->exiting)
5913     return;
5914     @@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
5915     * 1. Minimize time until we notice a stale entry
5916     * 2. Maximize scan intervals to not waste cycles
5917     *
5918     - * Normally, expired_count will be 0, this increases the next_run time
5919     - * to priorize 2) above.
5920     + * Normally, expire ratio will be close to 0.
5921     *
5922     - * As soon as a timed-out entry is found, move towards 1) and increase
5923     - * the scan frequency.
5924     - * In case we have lots of evictions next scan is done immediately.
5925     + * As soon as a sizeable fraction of the entries have expired
5926     + * increase scan frequency.
5927     */
5928     ratio = scanned ? expired_count * 100 / scanned : 0;
5929     - if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
5930     - gc_work->next_gc_run = 0;
5931     - next_run = 0;
5932     - } else if (expired_count) {
5933     - gc_work->next_gc_run /= 2U;
5934     - next_run = msecs_to_jiffies(1);
5935     + if (ratio > GC_EVICT_RATIO) {
5936     + gc_work->next_gc_run = min_interval;
5937     } else {
5938     - if (gc_work->next_gc_run < GC_INTERVAL_MAX)
5939     - gc_work->next_gc_run += msecs_to_jiffies(1);
5940     + unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
5941    
5942     - next_run = gc_work->next_gc_run;
5943     + BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
5944     +
5945     + gc_work->next_gc_run += min_interval;
5946     + if (gc_work->next_gc_run > max)
5947     + gc_work->next_gc_run = max;
5948     }
5949    
5950     + next_run = gc_work->next_gc_run;
5951     gc_work->last_bucket = i;
5952     queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
5953     }
5954     @@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
5955     static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
5956     {
5957     INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
5958     - gc_work->next_gc_run = GC_INTERVAL_MAX;
5959     + gc_work->next_gc_run = HZ;
5960     gc_work->exiting = false;
5961     }
5962    
5963     @@ -1918,7 +1916,7 @@ int nf_conntrack_init_start(void)
5964     nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
5965    
5966     conntrack_gc_work_init(&conntrack_gc_work);
5967     - queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
5968     + queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
5969    
5970     return 0;
5971    
5972     diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
5973     index d987c2d3dd6e..f57c9f0ab8f9 100644
5974     --- a/net/sunrpc/xprtrdma/rpc_rdma.c
5975     +++ b/net/sunrpc/xprtrdma/rpc_rdma.c
5976     @@ -125,14 +125,34 @@ void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
5977     /* The client can send a request inline as long as the RPCRDMA header
5978     * plus the RPC call fit under the transport's inline limit. If the
5979     * combined call message size exceeds that limit, the client must use
5980     - * the read chunk list for this operation.
5981     + * a Read chunk for this operation.
5982     + *
5983     + * A Read chunk is also required if sending the RPC call inline would
5984     + * exceed this device's max_sge limit.
5985     */
5986     static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
5987     struct rpc_rqst *rqst)
5988     {
5989     - struct rpcrdma_ia *ia = &r_xprt->rx_ia;
5990     + struct xdr_buf *xdr = &rqst->rq_snd_buf;
5991     + unsigned int count, remaining, offset;
5992     +
5993     + if (xdr->len > r_xprt->rx_ia.ri_max_inline_write)
5994     + return false;
5995     +
5996     + if (xdr->page_len) {
5997     + remaining = xdr->page_len;
5998     + offset = xdr->page_base & ~PAGE_MASK;
5999     + count = 0;
6000     + while (remaining) {
6001     + remaining -= min_t(unsigned int,
6002     + PAGE_SIZE - offset, remaining);
6003     + offset = 0;
6004     + if (++count > r_xprt->rx_ia.ri_max_send_sges)
6005     + return false;
6006     + }
6007     + }
6008    
6009     - return rqst->rq_snd_buf.len <= ia->ri_max_inline_write;
6010     + return true;
6011     }
6012    
6013     /* The client can't know how large the actual reply will be. Thus it
6014     @@ -186,9 +206,9 @@ rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n)
6015     */
6016    
6017     static int
6018     -rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
6019     - enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg,
6020     - bool reminv_expected)
6021     +rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
6022     + unsigned int pos, enum rpcrdma_chunktype type,
6023     + struct rpcrdma_mr_seg *seg)
6024     {
6025     int len, n, p, page_base;
6026     struct page **ppages;
6027     @@ -226,22 +246,21 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
6028     if (len && n == RPCRDMA_MAX_SEGS)
6029     goto out_overflow;
6030    
6031     - /* When encoding the read list, the tail is always sent inline */
6032     - if (type == rpcrdma_readch)
6033     + /* When encoding a Read chunk, the tail iovec contains an
6034     + * XDR pad and may be omitted.
6035     + */
6036     + if (type == rpcrdma_readch && r_xprt->rx_ia.ri_implicit_roundup)
6037     return n;
6038    
6039     - /* When encoding the Write list, some servers need to see an extra
6040     - * segment for odd-length Write chunks. The upper layer provides
6041     - * space in the tail iovec for this purpose.
6042     + /* When encoding a Write chunk, some servers need to see an
6043     + * extra segment for non-XDR-aligned Write chunks. The upper
6044     + * layer provides space in the tail iovec that may be used
6045     + * for this purpose.
6046     */
6047     - if (type == rpcrdma_writech && reminv_expected)
6048     + if (type == rpcrdma_writech && r_xprt->rx_ia.ri_implicit_roundup)
6049     return n;
6050    
6051     if (xdrbuf->tail[0].iov_len) {
6052     - /* the rpcrdma protocol allows us to omit any trailing
6053     - * xdr pad bytes, saving the server an RDMA operation. */
6054     - if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
6055     - return n;
6056     n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
6057     if (n == RPCRDMA_MAX_SEGS)
6058     goto out_overflow;
6059     @@ -293,7 +312,8 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
6060     if (rtype == rpcrdma_areadch)
6061     pos = 0;
6062     seg = req->rl_segments;
6063     - nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg, false);
6064     + nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
6065     + rtype, seg);
6066     if (nsegs < 0)
6067     return ERR_PTR(nsegs);
6068    
6069     @@ -355,10 +375,9 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
6070     }
6071    
6072     seg = req->rl_segments;
6073     - nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf,
6074     + nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
6075     rqst->rq_rcv_buf.head[0].iov_len,
6076     - wtype, seg,
6077     - r_xprt->rx_ia.ri_reminv_expected);
6078     + wtype, seg);
6079     if (nsegs < 0)
6080     return ERR_PTR(nsegs);
6081    
6082     @@ -423,8 +442,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
6083     }
6084    
6085     seg = req->rl_segments;
6086     - nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg,
6087     - r_xprt->rx_ia.ri_reminv_expected);
6088     + nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
6089     if (nsegs < 0)
6090     return ERR_PTR(nsegs);
6091    
6092     diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
6093     index ed5e285fd2ea..fa324fe73946 100644
6094     --- a/net/sunrpc/xprtrdma/transport.c
6095     +++ b/net/sunrpc/xprtrdma/transport.c
6096     @@ -67,7 +67,7 @@ unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
6097     static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
6098     static unsigned int xprt_rdma_inline_write_padding;
6099     static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
6100     - int xprt_rdma_pad_optimize = 1;
6101     + int xprt_rdma_pad_optimize = 0;
6102    
6103     #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
6104    
6105     diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
6106     index 8da7f6a4dfc3..e2c37061edbe 100644
6107     --- a/net/sunrpc/xprtrdma/verbs.c
6108     +++ b/net/sunrpc/xprtrdma/verbs.c
6109     @@ -208,6 +208,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
6110    
6111     /* Default settings for RPC-over-RDMA Version One */
6112     r_xprt->rx_ia.ri_reminv_expected = false;
6113     + r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
6114     rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
6115     wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
6116    
6117     @@ -215,6 +216,7 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
6118     pmsg->cp_magic == rpcrdma_cmp_magic &&
6119     pmsg->cp_version == RPCRDMA_CMP_VERSION) {
6120     r_xprt->rx_ia.ri_reminv_expected = true;
6121     + r_xprt->rx_ia.ri_implicit_roundup = true;
6122     rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
6123     wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
6124     }
6125     @@ -477,18 +479,19 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia)
6126     */
6127     int
6128     rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
6129     - struct rpcrdma_create_data_internal *cdata)
6130     + struct rpcrdma_create_data_internal *cdata)
6131     {
6132     struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
6133     + unsigned int max_qp_wr, max_sge;
6134     struct ib_cq *sendcq, *recvcq;
6135     - unsigned int max_qp_wr;
6136     int rc;
6137    
6138     - if (ia->ri_device->attrs.max_sge < RPCRDMA_MAX_SEND_SGES) {
6139     - dprintk("RPC: %s: insufficient sge's available\n",
6140     - __func__);
6141     + max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
6142     + if (max_sge < RPCRDMA_MIN_SEND_SGES) {
6143     + pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
6144     return -ENOMEM;
6145     }
6146     + ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES;
6147    
6148     if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
6149     dprintk("RPC: %s: insufficient wqe's available\n",
6150     @@ -513,7 +516,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
6151     ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
6152     ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
6153     ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */
6154     - ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_SEND_SGES;
6155     + ep->rep_attr.cap.max_send_sge = max_sge;
6156     ep->rep_attr.cap.max_recv_sge = 1;
6157     ep->rep_attr.cap.max_inline_data = 0;
6158     ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
6159     diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
6160     index f6ae1b22da47..48989d5b2883 100644
6161     --- a/net/sunrpc/xprtrdma/xprt_rdma.h
6162     +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
6163     @@ -74,7 +74,9 @@ struct rpcrdma_ia {
6164     unsigned int ri_max_frmr_depth;
6165     unsigned int ri_max_inline_write;
6166     unsigned int ri_max_inline_read;
6167     + unsigned int ri_max_send_sges;
6168     bool ri_reminv_expected;
6169     + bool ri_implicit_roundup;
6170     struct ib_qp_attr ri_qp_attr;
6171     struct ib_qp_init_attr ri_qp_init_attr;
6172     };
6173     @@ -309,6 +311,7 @@ struct rpcrdma_mr_seg { /* chunk descriptors */
6174     * - xdr_buf tail iovec
6175     */
6176     enum {
6177     + RPCRDMA_MIN_SEND_SGES = 3,
6178     RPCRDMA_MAX_SEND_PAGES = PAGE_SIZE + RPCRDMA_MAX_INLINE - 1,
6179     RPCRDMA_MAX_PAGE_SGES = (RPCRDMA_MAX_SEND_PAGES >> PAGE_SHIFT) + 1,
6180     RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
6181     diff --git a/samples/seccomp/bpf-helper.h b/samples/seccomp/bpf-helper.h
6182     index 38ee70f3cd5b..1d8de9edd858 100644
6183     --- a/samples/seccomp/bpf-helper.h
6184     +++ b/samples/seccomp/bpf-helper.h
6185     @@ -138,7 +138,7 @@ union arg64 {
6186     #define ARG_32(idx) \
6187     BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
6188    
6189     -/* Loads hi into A and lo in X */
6190     +/* Loads lo into M[0] and hi into M[1] and A */
6191     #define ARG_64(idx) \
6192     BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
6193     BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
6194     @@ -153,88 +153,107 @@ union arg64 {
6195     BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
6196     jt
6197    
6198     -/* Checks the lo, then swaps to check the hi. A=lo,X=hi */
6199     +#define JA32(value, jt) \
6200     + BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
6201     + jt
6202     +
6203     +#define JGE32(value, jt) \
6204     + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
6205     + jt
6206     +
6207     +#define JGT32(value, jt) \
6208     + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
6209     + jt
6210     +
6211     +#define JLE32(value, jt) \
6212     + BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
6213     + jt
6214     +
6215     +#define JLT32(value, jt) \
6216     + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
6217     + jt
6218     +
6219     +/*
6220     + * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
6221     + * A and M[1]. This invariant is kept by restoring A if necessary.
6222     + */
6223     #define JEQ64(lo, hi, jt) \
6224     + /* if (hi != arg.hi) goto NOMATCH; */ \
6225     BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
6226     BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
6227     + /* if (lo != arg.lo) goto NOMATCH; */ \
6228     BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
6229     - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
6230     + BPF_STMT(BPF_LD+BPF_MEM, 1), \
6231     jt, \
6232     - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
6233     + BPF_STMT(BPF_LD+BPF_MEM, 1)
6234    
6235     #define JNE64(lo, hi, jt) \
6236     - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 5, 0), \
6237     - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
6238     + /* if (hi != arg.hi) goto MATCH; */ \
6239     + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
6240     + BPF_STMT(BPF_LD+BPF_MEM, 0), \
6241     + /* if (lo != arg.lo) goto MATCH; */ \
6242     BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
6243     - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
6244     + BPF_STMT(BPF_LD+BPF_MEM, 1), \
6245     jt, \
6246     - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
6247     -
6248     -#define JA32(value, jt) \
6249     - BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
6250     - jt
6251     + BPF_STMT(BPF_LD+BPF_MEM, 1)
6252    
6253     #define JA64(lo, hi, jt) \
6254     + /* if (hi & arg.hi) goto MATCH; */ \
6255     BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
6256     - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
6257     + BPF_STMT(BPF_LD+BPF_MEM, 0), \
6258     + /* if (lo & arg.lo) goto MATCH; */ \
6259     BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
6260     - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
6261     + BPF_STMT(BPF_LD+BPF_MEM, 1), \
6262     jt, \
6263     - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
6264     + BPF_STMT(BPF_LD+BPF_MEM, 1)
6265    
6266     -#define JGE32(value, jt) \
6267     - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
6268     - jt
6269     -
6270     -#define JLT32(value, jt) \
6271     - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
6272     - jt
6273     -
6274     -/* Shortcut checking if hi > arg.hi. */
6275     #define JGE64(lo, hi, jt) \
6276     + /* if (hi > arg.hi) goto MATCH; */ \
6277     BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
6278     + /* if (hi != arg.hi) goto NOMATCH; */ \
6279     BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
6280     - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
6281     + BPF_STMT(BPF_LD+BPF_MEM, 0), \
6282     + /* if (lo >= arg.lo) goto MATCH; */ \
6283     BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
6284     - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
6285     - jt, \
6286     - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
6287     -
6288     -#define JLT64(lo, hi, jt) \
6289     - BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
6290     - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
6291     - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
6292     - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
6293     - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
6294     + BPF_STMT(BPF_LD+BPF_MEM, 1), \
6295     jt, \
6296     - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
6297     + BPF_STMT(BPF_LD+BPF_MEM, 1)
6298    
6299     -#define JGT32(value, jt) \
6300     - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
6301     - jt
6302     -
6303     -#define JLE32(value, jt) \
6304     - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
6305     - jt
6306     -
6307     -/* Check hi > args.hi first, then do the GE checking */
6308     #define JGT64(lo, hi, jt) \
6309     + /* if (hi > arg.hi) goto MATCH; */ \
6310     BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
6311     + /* if (hi != arg.hi) goto NOMATCH; */ \
6312     BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
6313     - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
6314     + BPF_STMT(BPF_LD+BPF_MEM, 0), \
6315     + /* if (lo > arg.lo) goto MATCH; */ \
6316     BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
6317     - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
6318     + BPF_STMT(BPF_LD+BPF_MEM, 1), \
6319     jt, \
6320     - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
6321     + BPF_STMT(BPF_LD+BPF_MEM, 1)
6322    
6323     #define JLE64(lo, hi, jt) \
6324     - BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 6, 0), \
6325     - BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
6326     - BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
6327     + /* if (hi < arg.hi) goto MATCH; */ \
6328     + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
6329     + /* if (hi != arg.hi) goto NOMATCH; */ \
6330     + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
6331     + BPF_STMT(BPF_LD+BPF_MEM, 0), \
6332     + /* if (lo <= arg.lo) goto MATCH; */ \
6333     BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
6334     - BPF_STMT(BPF_LD+BPF_MEM, 1), /* passed: swap hi back in */ \
6335     + BPF_STMT(BPF_LD+BPF_MEM, 1), \
6336     + jt, \
6337     + BPF_STMT(BPF_LD+BPF_MEM, 1)
6338     +
6339     +#define JLT64(lo, hi, jt) \
6340     + /* if (hi < arg.hi) goto MATCH; */ \
6341     + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
6342     + /* if (hi != arg.hi) goto NOMATCH; */ \
6343     + BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
6344     + BPF_STMT(BPF_LD+BPF_MEM, 0), \
6345     + /* if (lo < arg.lo) goto MATCH; */ \
6346     + BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
6347     + BPF_STMT(BPF_LD+BPF_MEM, 1), \
6348     jt, \
6349     - BPF_STMT(BPF_LD+BPF_MEM, 1) /* failed: swap hi back in */
6350     + BPF_STMT(BPF_LD+BPF_MEM, 1)
6351    
6352     #define LOAD_SYSCALL_NR \
6353     BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
6354     diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
6355     index db25f54a04fe..df7834aa1b8f 100644
6356     --- a/security/integrity/ima/ima.h
6357     +++ b/security/integrity/ima/ima.h
6358     @@ -173,7 +173,7 @@ int ima_store_template(struct ima_template_entry *entry, int violation,
6359     struct inode *inode,
6360     const unsigned char *filename, int pcr);
6361     void ima_free_template_entry(struct ima_template_entry *entry);
6362     -const char *ima_d_path(const struct path *path, char **pathbuf);
6363     +const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
6364    
6365     /* IMA policy related functions */
6366     int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
6367     diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
6368     index 9df26a2b75ba..d01a52f8f708 100644
6369     --- a/security/integrity/ima/ima_api.c
6370     +++ b/security/integrity/ima/ima_api.c
6371     @@ -318,7 +318,17 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
6372     iint->flags |= IMA_AUDITED;
6373     }
6374    
6375     -const char *ima_d_path(const struct path *path, char **pathbuf)
6376     +/*
6377     + * ima_d_path - return a pointer to the full pathname
6378     + *
6379     + * Attempt to return a pointer to the full pathname for use in the
6380     + * IMA measurement list, IMA audit records, and auditing logs.
6381     + *
6382     + * On failure, return a pointer to a copy of the filename, not dname.
6383     + * Returning a pointer to dname, could result in using the pointer
6384     + * after the memory has been freed.
6385     + */
6386     +const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
6387     {
6388     char *pathname = NULL;
6389    
6390     @@ -331,5 +341,11 @@ const char *ima_d_path(const struct path *path, char **pathbuf)
6391     pathname = NULL;
6392     }
6393     }
6394     - return pathname ?: (const char *)path->dentry->d_name.name;
6395     +
6396     + if (!pathname) {
6397     + strlcpy(namebuf, path->dentry->d_name.name, NAME_MAX);
6398     + pathname = namebuf;
6399     + }
6400     +
6401     + return pathname;
6402     }
6403     diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
6404     index 423d111b3b94..0e8762945e79 100644
6405     --- a/security/integrity/ima/ima_main.c
6406     +++ b/security/integrity/ima/ima_main.c
6407     @@ -83,6 +83,7 @@ static void ima_rdwr_violation_check(struct file *file,
6408     const char **pathname)
6409     {
6410     struct inode *inode = file_inode(file);
6411     + char filename[NAME_MAX];
6412     fmode_t mode = file->f_mode;
6413     bool send_tomtou = false, send_writers = false;
6414    
6415     @@ -102,7 +103,7 @@ static void ima_rdwr_violation_check(struct file *file,
6416     if (!send_tomtou && !send_writers)
6417     return;
6418    
6419     - *pathname = ima_d_path(&file->f_path, pathbuf);
6420     + *pathname = ima_d_path(&file->f_path, pathbuf, filename);
6421    
6422     if (send_tomtou)
6423     ima_add_violation(file, *pathname, iint,
6424     @@ -161,6 +162,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
6425     struct integrity_iint_cache *iint = NULL;
6426     struct ima_template_desc *template_desc;
6427     char *pathbuf = NULL;
6428     + char filename[NAME_MAX];
6429     const char *pathname = NULL;
6430     int rc = -ENOMEM, action, must_appraise;
6431     int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
6432     @@ -239,8 +241,8 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
6433     goto out_digsig;
6434     }
6435    
6436     - if (!pathname) /* ima_rdwr_violation possibly pre-fetched */
6437     - pathname = ima_d_path(&file->f_path, &pathbuf);
6438     + if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
6439     + pathname = ima_d_path(&file->f_path, &pathbuf, filename);
6440    
6441     if (action & IMA_MEASURE)
6442     ima_store_measurement(iint, file, pathname,
6443     diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
6444     index 1d5acbe0c08b..86240d02b530 100644
6445     --- a/sound/core/seq/seq_fifo.c
6446     +++ b/sound/core/seq/seq_fifo.c
6447     @@ -135,6 +135,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
6448     f->tail = cell;
6449     if (f->head == NULL)
6450     f->head = cell;
6451     + cell->next = NULL;
6452     f->cells++;
6453     spin_unlock_irqrestore(&f->lock, flags);
6454    
6455     @@ -214,6 +215,8 @@ void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
6456     spin_lock_irqsave(&f->lock, flags);
6457     cell->next = f->head;
6458     f->head = cell;
6459     + if (!f->tail)
6460     + f->tail = cell;
6461     f->cells++;
6462     spin_unlock_irqrestore(&f->lock, flags);
6463     }
6464     diff --git a/sound/core/timer.c b/sound/core/timer.c
6465     index fc144f43faa6..ad153149b231 100644
6466     --- a/sound/core/timer.c
6467     +++ b/sound/core/timer.c
6468     @@ -1702,9 +1702,21 @@ static int snd_timer_user_params(struct file *file,
6469     return -EBADFD;
6470     if (copy_from_user(&params, _params, sizeof(params)))
6471     return -EFAULT;
6472     - if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
6473     - err = -EINVAL;
6474     - goto _end;
6475     + if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE)) {
6476     + u64 resolution;
6477     +
6478     + if (params.ticks < 1) {
6479     + err = -EINVAL;
6480     + goto _end;
6481     + }
6482     +
6483     + /* Don't allow resolution less than 1ms */
6484     + resolution = snd_timer_resolution(tu->timeri);
6485     + resolution *= params.ticks;
6486     + if (resolution < 1000000) {
6487     + err = -EINVAL;
6488     + goto _end;
6489     + }
6490     }
6491     if (params.queue_size > 0 &&
6492     (params.queue_size < 32 || params.queue_size > 1024)) {
6493     diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
6494     index 9667cbfb0ca2..ab4cdab5cfa5 100644
6495     --- a/sound/pci/ctxfi/cthw20k1.c
6496     +++ b/sound/pci/ctxfi/cthw20k1.c
6497     @@ -27,12 +27,6 @@
6498     #include "cthw20k1.h"
6499     #include "ct20k1reg.h"
6500    
6501     -#if BITS_PER_LONG == 32
6502     -#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */
6503     -#else
6504     -#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */
6505     -#endif
6506     -
6507     struct hw20k1 {
6508     struct hw hw;
6509     spinlock_t reg_20k1_lock;
6510     @@ -1904,19 +1898,18 @@ static int hw_card_start(struct hw *hw)
6511     {
6512     int err;
6513     struct pci_dev *pci = hw->pci;
6514     + const unsigned int dma_bits = BITS_PER_LONG;
6515    
6516     err = pci_enable_device(pci);
6517     if (err < 0)
6518     return err;
6519    
6520     /* Set DMA transfer mask */
6521     - if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 ||
6522     - dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) {
6523     - dev_err(hw->card->dev,
6524     - "architecture does not support PCI busmaster DMA with mask 0x%llx\n",
6525     - CT_XFI_DMA_MASK);
6526     - err = -ENXIO;
6527     - goto error1;
6528     + if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
6529     + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
6530     + } else {
6531     + dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
6532     + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
6533     }
6534    
6535     if (!hw->io_base) {
6536     diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
6537     index 6414ecf93efa..18ee7768b7c4 100644
6538     --- a/sound/pci/ctxfi/cthw20k2.c
6539     +++ b/sound/pci/ctxfi/cthw20k2.c
6540     @@ -26,12 +26,6 @@
6541     #include "cthw20k2.h"
6542     #include "ct20k2reg.h"
6543    
6544     -#if BITS_PER_LONG == 32
6545     -#define CT_XFI_DMA_MASK DMA_BIT_MASK(32) /* 32 bit PTE */
6546     -#else
6547     -#define CT_XFI_DMA_MASK DMA_BIT_MASK(64) /* 64 bit PTE */
6548     -#endif
6549     -
6550     struct hw20k2 {
6551     struct hw hw;
6552     /* for i2c */
6553     @@ -2029,19 +2023,18 @@ static int hw_card_start(struct hw *hw)
6554     int err = 0;
6555     struct pci_dev *pci = hw->pci;
6556     unsigned int gctl;
6557     + const unsigned int dma_bits = BITS_PER_LONG;
6558    
6559     err = pci_enable_device(pci);
6560     if (err < 0)
6561     return err;
6562    
6563     /* Set DMA transfer mask */
6564     - if (dma_set_mask(&pci->dev, CT_XFI_DMA_MASK) < 0 ||
6565     - dma_set_coherent_mask(&pci->dev, CT_XFI_DMA_MASK) < 0) {
6566     - dev_err(hw->card->dev,
6567     - "architecture does not support PCI busmaster DMA with mask 0x%llx\n",
6568     - CT_XFI_DMA_MASK);
6569     - err = -ENXIO;
6570     - goto error1;
6571     + if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
6572     + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
6573     + } else {
6574     + dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
6575     + dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(32));
6576     }
6577    
6578     if (!hw->io_base) {
6579     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6580     index c64d986009a9..bc4462694aaf 100644
6581     --- a/sound/pci/hda/hda_intel.c
6582     +++ b/sound/pci/hda/hda_intel.c
6583     @@ -2197,9 +2197,9 @@ static const struct pci_device_id azx_ids[] = {
6584     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6585     /* Lewisburg */
6586     { PCI_DEVICE(0x8086, 0xa1f0),
6587     - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6588     + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
6589     { PCI_DEVICE(0x8086, 0xa270),
6590     - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6591     + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
6592     /* Lynx Point-LP */
6593     { PCI_DEVICE(0x8086, 0x9c20),
6594     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
6595     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6596     index 758ac86a1d3a..0c62b1d8c11b 100644
6597     --- a/sound/pci/hda/patch_realtek.c
6598     +++ b/sound/pci/hda/patch_realtek.c
6599     @@ -5577,6 +5577,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6600     SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
6601     SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
6602     SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
6603     + SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
6604     SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6605     SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6606     SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
6607     @@ -5692,6 +5693,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6608     SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
6609     SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6610     SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6611     + SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6612     SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
6613     SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
6614     SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
6615     @@ -6065,6 +6067,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
6616     SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
6617     ALC298_STANDARD_PINS,
6618     {0x17, 0x90170150}),
6619     + SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_SPK_VOLUME,
6620     + {0x12, 0xb7a60140},
6621     + {0x13, 0xb7a60150},
6622     + {0x17, 0x90170110},
6623     + {0x1a, 0x03011020},
6624     + {0x21, 0x03211030}),
6625     {}
6626     };
6627    
6628     diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
6629     index ae58b493af45..ecf6236f3b5f 100644
6630     --- a/tools/perf/util/callchain.c
6631     +++ b/tools/perf/util/callchain.c
6632     @@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
6633     }
6634     call->ip = cursor_node->ip;
6635     call->ms.sym = cursor_node->sym;
6636     - call->ms.map = cursor_node->map;
6637     + call->ms.map = map__get(cursor_node->map);
6638     list_add_tail(&call->list, &node->val);
6639    
6640     callchain_cursor_advance(cursor);
6641     @@ -462,6 +462,7 @@ add_child(struct callchain_node *parent,
6642    
6643     list_for_each_entry_safe(call, tmp, &new->val, list) {
6644     list_del(&call->list);
6645     + map__zput(call->ms.map);
6646     free(call);
6647     }
6648     free(new);
6649     @@ -730,6 +731,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
6650     callchain_cursor_append(cursor, list->ip,
6651     list->ms.map, list->ms.sym);
6652     list_del(&list->list);
6653     + map__zput(list->ms.map);
6654     free(list);
6655     }
6656    
6657     @@ -778,7 +780,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
6658     }
6659    
6660     node->ip = ip;
6661     - node->map = map;
6662     + map__zput(node->map);
6663     + node->map = map__get(map);
6664     node->sym = sym;
6665    
6666     cursor->nr++;
6667     @@ -945,11 +948,13 @@ static void free_callchain_node(struct callchain_node *node)
6668    
6669     list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
6670     list_del(&list->list);
6671     + map__zput(list->ms.map);
6672     free(list);
6673     }
6674    
6675     list_for_each_entry_safe(list, tmp, &node->val, list) {
6676     list_del(&list->list);
6677     + map__zput(list->ms.map);
6678     free(list);
6679     }
6680    
6681     @@ -1013,6 +1018,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
6682     goto out;
6683     *new = *chain;
6684     new->has_children = false;
6685     + map__get(new->ms.map);
6686     list_add_tail(&new->list, &head);
6687     }
6688     parent = parent->parent;
6689     @@ -1033,6 +1039,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
6690     out:
6691     list_for_each_entry_safe(chain, new, &head, list) {
6692     list_del(&chain->list);
6693     + map__zput(chain->ms.map);
6694     free(chain);
6695     }
6696     return -ENOMEM;
6697     diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
6698     index 47cfd1080975..b7cbabb3931f 100644
6699     --- a/tools/perf/util/callchain.h
6700     +++ b/tools/perf/util/callchain.h
6701     @@ -5,6 +5,7 @@
6702     #include <linux/list.h>
6703     #include <linux/rbtree.h>
6704     #include "event.h"
6705     +#include "map.h"
6706     #include "symbol.h"
6707    
6708     #define HELP_PAD "\t\t\t\t"
6709     @@ -174,8 +175,13 @@ int callchain_merge(struct callchain_cursor *cursor,
6710     */
6711     static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
6712     {
6713     + struct callchain_cursor_node *node;
6714     +
6715     cursor->nr = 0;
6716     cursor->last = &cursor->first;
6717     +
6718     + for (node = cursor->first; node != NULL; node = node->next)
6719     + map__zput(node->map);
6720     }
6721    
6722     int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
6723     diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
6724     index a69f027368ef..10849a079026 100644
6725     --- a/tools/perf/util/hist.c
6726     +++ b/tools/perf/util/hist.c
6727     @@ -1,6 +1,7 @@
6728     #include "util.h"
6729     #include "build-id.h"
6730     #include "hist.h"
6731     +#include "map.h"
6732     #include "session.h"
6733     #include "sort.h"
6734     #include "evlist.h"
6735     @@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
6736     int max_stack_depth, void *arg)
6737     {
6738     int err, err2;
6739     + struct map *alm = NULL;
6740     +
6741     + if (al && al->map)
6742     + alm = map__get(al->map);
6743    
6744     err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
6745     iter->evsel, al, max_stack_depth);
6746     @@ -1058,6 +1063,8 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
6747     if (!err)
6748     err = err2;
6749    
6750     + map__put(alm);
6751     +
6752     return err;
6753     }
6754    
6755     diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
6756     index d918dcf26a5a..f138ed2e9c63 100644
6757     --- a/virt/kvm/arm/vgic/vgic-irqfd.c
6758     +++ b/virt/kvm/arm/vgic/vgic-irqfd.c
6759     @@ -99,6 +99,9 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
6760     if (!vgic_has_its(kvm))
6761     return -ENODEV;
6762    
6763     + if (!level)
6764     + return -1;
6765     +
6766     return vgic_its_inject_msi(kvm, &msi);
6767     }
6768