Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0303-4.9.204-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3578 - (show annotations) (download)
Thu Aug 13 10:21:19 2020 UTC (3 years, 8 months ago) by niro
File size: 173877 byte(s)
linux-204
1 diff --git a/Documentation/hw-vuln/mds.rst b/Documentation/hw-vuln/mds.rst
2 index daf6fdac49a3..fbbd1719afb9 100644
3 --- a/Documentation/hw-vuln/mds.rst
4 +++ b/Documentation/hw-vuln/mds.rst
5 @@ -265,8 +265,11 @@ time with the option "mds=". The valid arguments for this option are:
6
7 ============ =============================================================
8
9 -Not specifying this option is equivalent to "mds=full".
10 -
11 +Not specifying this option is equivalent to "mds=full". For processors
12 +that are affected by both TAA (TSX Asynchronous Abort) and MDS,
13 +specifying just "mds=off" without an accompanying "tsx_async_abort=off"
14 +will have no effect as the same mitigation is used for both
15 +vulnerabilities.
16
17 Mitigation selection guide
18 --------------------------
19 diff --git a/Documentation/hw-vuln/tsx_async_abort.rst b/Documentation/hw-vuln/tsx_async_abort.rst
20 index fddbd7579c53..af6865b822d2 100644
21 --- a/Documentation/hw-vuln/tsx_async_abort.rst
22 +++ b/Documentation/hw-vuln/tsx_async_abort.rst
23 @@ -174,7 +174,10 @@ the option "tsx_async_abort=". The valid arguments for this option are:
24 CPU is not vulnerable to cross-thread TAA attacks.
25 ============ =============================================================
26
27 -Not specifying this option is equivalent to "tsx_async_abort=full".
28 +Not specifying this option is equivalent to "tsx_async_abort=full". For
29 +processors that are affected by both TAA and MDS, specifying just
30 +"tsx_async_abort=off" without an accompanying "mds=off" will have no
31 +effect as the same mitigation is used for both vulnerabilities.
32
33 The kernel command line also allows to control the TSX feature using the
34 parameter "tsx=" on CPUs which support TSX control. MSR_IA32_TSX_CTRL is used
35 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
36 index c81a008d6512..1bc12619bedd 100644
37 --- a/Documentation/kernel-parameters.txt
38 +++ b/Documentation/kernel-parameters.txt
39 @@ -2365,6 +2365,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
40 SMT on vulnerable CPUs
41 off - Unconditionally disable MDS mitigation
42
43 + On TAA-affected machines, mds=off can be prevented by
44 + an active TAA mitigation as both vulnerabilities are
45 + mitigated with the same mechanism so in order to disable
46 + this mitigation, you need to specify tsx_async_abort=off
47 + too.
48 +
49 Not specifying this option is equivalent to
50 mds=full.
51
52 @@ -4599,6 +4605,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
53 vulnerable to cross-thread TAA attacks.
54 off - Unconditionally disable TAA mitigation
55
56 + On MDS-affected machines, tsx_async_abort=off can be
57 + prevented by an active MDS mitigation as both vulnerabilities
58 + are mitigated with the same mechanism so in order to disable
59 + this mitigation, you need to specify mds=off too.
60 +
61 Not specifying this option is equivalent to
62 tsx_async_abort=full. On CPUs which are MDS affected
63 and deploy MDS mitigation, TAA mitigation is not
64 diff --git a/Makefile b/Makefile
65 index 174c0e2526ac..0234869784fa 100644
66 --- a/Makefile
67 +++ b/Makefile
68 @@ -1,6 +1,6 @@
69 VERSION = 4
70 PATCHLEVEL = 9
71 -SUBLEVEL = 203
72 +SUBLEVEL = 204
73 EXTRAVERSION =
74 NAME = Roaring Lionus
75
76 diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
77 index 2ce24e74f879..a509b77ef80d 100644
78 --- a/arch/arc/kernel/perf_event.c
79 +++ b/arch/arc/kernel/perf_event.c
80 @@ -488,8 +488,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
81 /* loop thru all available h/w condition indexes */
82 for (j = 0; j < cc_bcr.c; j++) {
83 write_aux_reg(ARC_REG_CC_INDEX, j);
84 - cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
85 - cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
86 + cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
87 + cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
88
89 /* See if it has been mapped to a perf event_id */
90 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
91 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
92 index 241bf898adf5..7edc6c3f4bd9 100644
93 --- a/arch/arm/mm/mmu.c
94 +++ b/arch/arm/mm/mmu.c
95 @@ -1188,6 +1188,9 @@ void __init adjust_lowmem_bounds(void)
96 phys_addr_t block_start = reg->base;
97 phys_addr_t block_end = reg->base + reg->size;
98
99 + if (memblock_is_nomap(reg))
100 + continue;
101 +
102 if (reg->base < vmalloc_limit) {
103 if (block_end > lowmem_limit)
104 /*
105 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
106 index ee94597773fa..8d469aa5fc98 100644
107 --- a/arch/arm64/Makefile
108 +++ b/arch/arm64/Makefile
109 @@ -134,6 +134,7 @@ archclean:
110 $(Q)$(MAKE) $(clean)=$(boot)
111 $(Q)$(MAKE) $(clean)=$(boot)/dts
112
113 +ifeq ($(KBUILD_EXTMOD),)
114 # We need to generate vdso-offsets.h before compiling certain files in kernel/.
115 # In order to do that, we should use the archprepare target, but we can't since
116 # asm-offsets.h is included in some files used to generate vdso-offsets.h, and
117 @@ -143,6 +144,7 @@ archclean:
118 prepare: vdso_prepare
119 vdso_prepare: prepare0
120 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
121 +endif
122
123 define archhelp
124 echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
125 diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
126 index 28bef94cf792..5962badb3346 100644
127 --- a/arch/arm64/kernel/traps.c
128 +++ b/arch/arm64/kernel/traps.c
129 @@ -611,7 +611,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
130 handler[reason], smp_processor_id(), esr,
131 esr_get_class_string(esr));
132
133 - die("Oops - bad mode", regs, 0);
134 local_irq_disable();
135 panic("bad mode");
136 }
137 diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
138 index b3536a82a262..e002084af101 100644
139 --- a/arch/m68k/kernel/uboot.c
140 +++ b/arch/m68k/kernel/uboot.c
141 @@ -103,5 +103,5 @@ __init void process_uboot_commandline(char *commandp, int size)
142 }
143
144 parse_uboot_commandline(commandp, len);
145 - commandp[size - 1] = 0;
146 + commandp[len - 1] = 0;
147 }
148 diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
149 index f3daa175f86c..c06cfdf12c0b 100644
150 --- a/arch/powerpc/include/asm/asm-prototypes.h
151 +++ b/arch/powerpc/include/asm/asm-prototypes.h
152 @@ -124,7 +124,10 @@ extern int __ucmpdi2(u64, u64);
153 /* Patch sites */
154 extern s32 patch__call_flush_count_cache;
155 extern s32 patch__flush_count_cache_return;
156 +extern s32 patch__flush_link_stack_return;
157 +extern s32 patch__call_kvm_flush_link_stack;
158
159 extern long flush_count_cache;
160 +extern long kvm_flush_link_stack;
161
162 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
163 diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
164 index 759597bf0fd8..ccf44c135389 100644
165 --- a/arch/powerpc/include/asm/security_features.h
166 +++ b/arch/powerpc/include/asm/security_features.h
167 @@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
168 // Software required to flush count cache on context switch
169 #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
170
171 +// Software required to flush link stack on context switch
172 +#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
173 +
174
175 // Features enabled by default
176 #define SEC_FTR_DEFAULT \
177 diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
178 index 1abd8dd77ec1..eee2131a97e6 100644
179 --- a/arch/powerpc/kernel/eeh_pe.c
180 +++ b/arch/powerpc/kernel/eeh_pe.c
181 @@ -370,7 +370,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
182 while (parent) {
183 if (!(parent->type & EEH_PE_INVALID))
184 break;
185 - parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
186 + parent->type &= ~EEH_PE_INVALID;
187 parent = parent->parent;
188 }
189
190 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
191 index 390ebf4ef384..38f0a75014eb 100644
192 --- a/arch/powerpc/kernel/entry_64.S
193 +++ b/arch/powerpc/kernel/entry_64.S
194 @@ -510,6 +510,7 @@ flush_count_cache:
195 /* Save LR into r9 */
196 mflr r9
197
198 + // Flush the link stack
199 .rept 64
200 bl .+4
201 .endr
202 @@ -519,6 +520,11 @@ flush_count_cache:
203 .balign 32
204 /* Restore LR */
205 1: mtlr r9
206 +
207 + // If we're just flushing the link stack, return here
208 +3: nop
209 + patch_site 3b patch__flush_link_stack_return
210 +
211 li r9,0x7fff
212 mtctr r9
213
214 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
215 index 47c6c0401b3a..54c95e7c74cc 100644
216 --- a/arch/powerpc/kernel/process.c
217 +++ b/arch/powerpc/kernel/process.c
218 @@ -576,12 +576,11 @@ void flush_all_to_thread(struct task_struct *tsk)
219 if (tsk->thread.regs) {
220 preempt_disable();
221 BUG_ON(tsk != current);
222 - save_all(tsk);
223 -
224 #ifdef CONFIG_SPE
225 if (tsk->thread.regs->msr & MSR_SPE)
226 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
227 #endif
228 + save_all(tsk);
229
230 preempt_enable();
231 }
232 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
233 index f4a98d9c5913..11fff9669cfd 100644
234 --- a/arch/powerpc/kernel/security.c
235 +++ b/arch/powerpc/kernel/security.c
236 @@ -25,11 +25,12 @@ enum count_cache_flush_type {
237 COUNT_CACHE_FLUSH_HW = 0x4,
238 };
239 static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
240 +static bool link_stack_flush_enabled;
241
242 bool barrier_nospec_enabled;
243 static bool no_nospec;
244 static bool btb_flush_enabled;
245 -#ifdef CONFIG_PPC_FSL_BOOK3E
246 +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
247 static bool no_spectrev2;
248 #endif
249
250 @@ -107,7 +108,7 @@ static __init int barrier_nospec_debugfs_init(void)
251 device_initcall(barrier_nospec_debugfs_init);
252 #endif /* CONFIG_DEBUG_FS */
253
254 -#ifdef CONFIG_PPC_FSL_BOOK3E
255 +#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
256 static int __init handle_nospectre_v2(char *p)
257 {
258 no_spectrev2 = true;
259 @@ -115,6 +116,9 @@ static int __init handle_nospectre_v2(char *p)
260 return 0;
261 }
262 early_param("nospectre_v2", handle_nospectre_v2);
263 +#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
264 +
265 +#ifdef CONFIG_PPC_FSL_BOOK3E
266 void setup_spectre_v2(void)
267 {
268 if (no_spectrev2)
269 @@ -202,11 +206,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
270
271 if (ccd)
272 seq_buf_printf(&s, "Indirect branch cache disabled");
273 +
274 + if (link_stack_flush_enabled)
275 + seq_buf_printf(&s, ", Software link stack flush");
276 +
277 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
278 seq_buf_printf(&s, "Mitigation: Software count cache flush");
279
280 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
281 seq_buf_printf(&s, " (hardware accelerated)");
282 +
283 + if (link_stack_flush_enabled)
284 + seq_buf_printf(&s, ", Software link stack flush");
285 +
286 } else if (btb_flush_enabled) {
287 seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
288 } else {
289 @@ -367,18 +379,49 @@ static __init int stf_barrier_debugfs_init(void)
290 device_initcall(stf_barrier_debugfs_init);
291 #endif /* CONFIG_DEBUG_FS */
292
293 +static void no_count_cache_flush(void)
294 +{
295 + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
296 + pr_info("count-cache-flush: software flush disabled.\n");
297 +}
298 +
299 static void toggle_count_cache_flush(bool enable)
300 {
301 - if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
302 + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
303 + !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
304 + enable = false;
305 +
306 + if (!enable) {
307 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
308 - count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
309 - pr_info("count-cache-flush: software flush disabled.\n");
310 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
311 + patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
312 +#endif
313 + pr_info("link-stack-flush: software flush disabled.\n");
314 + link_stack_flush_enabled = false;
315 + no_count_cache_flush();
316 return;
317 }
318
319 + // This enables the branch from _switch to flush_count_cache
320 patch_branch_site(&patch__call_flush_count_cache,
321 (u64)&flush_count_cache, BRANCH_SET_LINK);
322
323 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
324 + // This enables the branch from guest_exit_cont to kvm_flush_link_stack
325 + patch_branch_site(&patch__call_kvm_flush_link_stack,
326 + (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
327 +#endif
328 +
329 + pr_info("link-stack-flush: software flush enabled.\n");
330 + link_stack_flush_enabled = true;
331 +
332 + // If we just need to flush the link stack, patch an early return
333 + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
334 + patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
335 + no_count_cache_flush();
336 + return;
337 + }
338 +
339 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
340 count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
341 pr_info("count-cache-flush: full software flush sequence enabled.\n");
342 @@ -392,7 +435,26 @@ static void toggle_count_cache_flush(bool enable)
343
344 void setup_count_cache_flush(void)
345 {
346 - toggle_count_cache_flush(true);
347 + bool enable = true;
348 +
349 + if (no_spectrev2 || cpu_mitigations_off()) {
350 + if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
351 + security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
352 + pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
353 +
354 + enable = false;
355 + }
356 +
357 + /*
358 + * There's no firmware feature flag/hypervisor bit to tell us we need to
359 + * flush the link stack on context switch. So we set it here if we see
360 + * either of the Spectre v2 mitigations that aim to protect userspace.
361 + */
362 + if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
363 + security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
364 + security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
365 +
366 + toggle_count_cache_flush(enable);
367 }
368
369 #ifdef CONFIG_DEBUG_FS
370 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
371 index 79a180cf4c94..4b60bec20603 100644
372 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
373 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
374 @@ -18,6 +18,7 @@
375 */
376
377 #include <asm/ppc_asm.h>
378 +#include <asm/code-patching-asm.h>
379 #include <asm/kvm_asm.h>
380 #include <asm/reg.h>
381 #include <asm/mmu.h>
382 @@ -1266,6 +1267,10 @@ mc_cont:
383 bl kvmhv_accumulate_time
384 #endif
385
386 + /* Possibly flush the link stack here. */
387 +1: nop
388 + patch_site 1b patch__call_kvm_flush_link_stack
389 +
390 stw r12, STACK_SLOT_TRAP(r1)
391 mr r3, r12
392 /* Increment exit count, poke other threads to exit */
393 @@ -1685,6 +1690,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
394 mtlr r0
395 blr
396
397 +.balign 32
398 +.global kvm_flush_link_stack
399 +kvm_flush_link_stack:
400 + /* Save LR into r0 */
401 + mflr r0
402 +
403 + /* Flush the link stack. On Power8 it's up to 32 entries in size. */
404 + .rept 32
405 + bl .+4
406 + .endr
407 +
408 + /* And on Power9 it's up to 64. */
409 +BEGIN_FTR_SECTION
410 + .rept 32
411 + bl .+4
412 + .endr
413 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
414 +
415 + /* Restore LR */
416 + mtlr r0
417 + blr
418 +
419 /*
420 * Check whether an HDSI is an HPTE not found fault or something else.
421 * If it is an HPTE not found fault that is due to the guest accessing
422 diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
423 index 3db53e8aff92..9b2ef76578f0 100644
424 --- a/arch/powerpc/platforms/ps3/os-area.c
425 +++ b/arch/powerpc/platforms/ps3/os-area.c
426 @@ -664,7 +664,7 @@ static int update_flash_db(void)
427 db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
428
429 count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
430 - if (count < sizeof(struct os_area_db)) {
431 + if (count < 0 || count < sizeof(struct os_area_db)) {
432 pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
433 count);
434 error = count < 0 ? count : -EIO;
435 diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
436 index c0a0947f43bb..656bbbd731d0 100644
437 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
438 +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
439 @@ -616,7 +616,7 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb)
440 nid = memory_add_physaddr_to_nid(lmb->base_addr);
441
442 /* Add the memory */
443 - rc = add_memory(nid, lmb->base_addr, block_sz);
444 + rc = __add_memory(nid, lmb->base_addr, block_sz);
445 if (rc) {
446 dlpar_remove_device_tree_lmb(lmb);
447 dlpar_release_drc(lmb->drc_index);
448 diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
449 index 96e4fcad57bf..f46e5c0cb6d9 100644
450 --- a/arch/s390/kernel/perf_cpum_sf.c
451 +++ b/arch/s390/kernel/perf_cpum_sf.c
452 @@ -1611,14 +1611,17 @@ static int __init init_cpum_sampling_pmu(void)
453 }
454
455 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
456 - if (!sfdbg)
457 + if (!sfdbg) {
458 pr_err("Registering for s390dbf failed\n");
459 + return -ENOMEM;
460 + }
461 debug_register_view(sfdbg, &debug_sprintf_view);
462
463 err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
464 cpumf_measurement_alert);
465 if (err) {
466 pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
467 + debug_unregister(sfdbg);
468 goto out;
469 }
470
471 @@ -1627,6 +1630,7 @@ static int __init init_cpum_sampling_pmu(void)
472 pr_cpumsf_err(RS_INIT_FAILURE_PERF);
473 unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
474 cpumf_measurement_alert);
475 + debug_unregister(sfdbg);
476 goto out;
477 }
478
479 diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
480 index faa2f61058c2..92f0a46ace78 100644
481 --- a/arch/sparc/include/asm/cmpxchg_64.h
482 +++ b/arch/sparc/include/asm/cmpxchg_64.h
483 @@ -40,7 +40,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
484 return val;
485 }
486
487 -#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
488 +#define xchg(ptr,x) \
489 +({ __typeof__(*(ptr)) __ret; \
490 + __ret = (__typeof__(*(ptr))) \
491 + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
492 + __ret; \
493 +})
494
495 void __xchg_called_with_bad_pointer(void);
496
497 diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
498 index f005ccac91cc..e87c0f81b700 100644
499 --- a/arch/sparc/include/asm/parport.h
500 +++ b/arch/sparc/include/asm/parport.h
501 @@ -20,6 +20,7 @@
502 */
503 #define HAS_DMA
504
505 +#ifdef CONFIG_PARPORT_PC_FIFO
506 static DEFINE_SPINLOCK(dma_spin_lock);
507
508 #define claim_dma_lock() \
509 @@ -30,6 +31,7 @@ static DEFINE_SPINLOCK(dma_spin_lock);
510
511 #define release_dma_lock(__flags) \
512 spin_unlock_irqrestore(&dma_spin_lock, __flags);
513 +#endif
514
515 static struct sparc_ebus_info {
516 struct ebus_dma_info info;
517 diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
518 index 62087028a9ce..d2ad45c10113 100644
519 --- a/arch/um/drivers/line.c
520 +++ b/arch/um/drivers/line.c
521 @@ -260,7 +260,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
522 if (err == 0) {
523 spin_unlock(&line->lock);
524 return IRQ_NONE;
525 - } else if (err < 0) {
526 + } else if ((err < 0) && (err != -EAGAIN)) {
527 line->head = line->buffer;
528 line->tail = line->buffer;
529 }
530 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
531 index ea78a8438a8a..fb489cd848fa 100644
532 --- a/arch/x86/include/asm/ptrace.h
533 +++ b/arch/x86/include/asm/ptrace.h
534 @@ -199,24 +199,52 @@ static inline int regs_within_kernel_stack(struct pt_regs *regs,
535 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
536 }
537
538 +/**
539 + * regs_get_kernel_stack_nth_addr() - get the address of the Nth entry on stack
540 + * @regs: pt_regs which contains kernel stack pointer.
541 + * @n: stack entry number.
542 + *
543 + * regs_get_kernel_stack_nth() returns the address of the @n th entry of the
544 + * kernel stack which is specified by @regs. If the @n th entry is NOT in
545 + * the kernel stack, this returns NULL.
546 + */
547 +static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n)
548 +{
549 + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
550 +
551 + addr += n;
552 + if (regs_within_kernel_stack(regs, (unsigned long)addr))
553 + return addr;
554 + else
555 + return NULL;
556 +}
557 +
558 +/* To avoid include hell, we can't include uaccess.h */
559 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
560 +
561 /**
562 * regs_get_kernel_stack_nth() - get Nth entry of the stack
563 * @regs: pt_regs which contains kernel stack pointer.
564 * @n: stack entry number.
565 *
566 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
567 - * is specified by @regs. If the @n th entry is NOT in the kernel stack,
568 + * is specified by @regs. If the @n th entry is NOT in the kernel stack
569 * this returns 0.
570 */
571 static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
572 unsigned int n)
573 {
574 - unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
575 - addr += n;
576 - if (regs_within_kernel_stack(regs, (unsigned long)addr))
577 - return *addr;
578 - else
579 - return 0;
580 + unsigned long *addr;
581 + unsigned long val;
582 + long ret;
583 +
584 + addr = regs_get_kernel_stack_nth_addr(regs, n);
585 + if (addr) {
586 + ret = probe_kernel_read(&val, addr, sizeof(val));
587 + if (!ret)
588 + return val;
589 + }
590 + return 0;
591 }
592
593 #define arch_has_single_step() (1)
594 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
595 index 827fc38df97a..24307d5bb4b8 100644
596 --- a/arch/x86/kernel/cpu/bugs.c
597 +++ b/arch/x86/kernel/cpu/bugs.c
598 @@ -38,6 +38,7 @@ static void __init spectre_v2_select_mitigation(void);
599 static void __init ssb_select_mitigation(void);
600 static void __init l1tf_select_mitigation(void);
601 static void __init mds_select_mitigation(void);
602 +static void __init mds_print_mitigation(void);
603 static void __init taa_select_mitigation(void);
604
605 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
606 @@ -107,6 +108,12 @@ void __init check_bugs(void)
607 mds_select_mitigation();
608 taa_select_mitigation();
609
610 + /*
611 + * As MDS and TAA mitigations are inter-related, print MDS
612 + * mitigation until after TAA mitigation selection is done.
613 + */
614 + mds_print_mitigation();
615 +
616 arch_smt_update();
617
618 #ifdef CONFIG_X86_32
619 @@ -244,6 +251,12 @@ static void __init mds_select_mitigation(void)
620 (mds_nosmt || cpu_mitigations_auto_nosmt()))
621 cpu_smt_disable(false);
622 }
623 +}
624 +
625 +static void __init mds_print_mitigation(void)
626 +{
627 + if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off())
628 + return;
629
630 pr_info("%s\n", mds_strings[mds_mitigation]);
631 }
632 @@ -303,8 +316,12 @@ static void __init taa_select_mitigation(void)
633 return;
634 }
635
636 - /* TAA mitigation is turned off on the cmdline (tsx_async_abort=off) */
637 - if (taa_mitigation == TAA_MITIGATION_OFF)
638 + /*
639 + * TAA mitigation via VERW is turned off if both
640 + * tsx_async_abort=off and mds=off are specified.
641 + */
642 + if (taa_mitigation == TAA_MITIGATION_OFF &&
643 + mds_mitigation == MDS_MITIGATION_OFF)
644 goto out;
645
646 if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
647 @@ -338,6 +355,15 @@ static void __init taa_select_mitigation(void)
648 if (taa_nosmt || cpu_mitigations_auto_nosmt())
649 cpu_smt_disable(false);
650
651 + /*
652 + * Update MDS mitigation, if necessary, as the mds_user_clear is
653 + * now enabled for TAA mitigation.
654 + */
655 + if (mds_mitigation == MDS_MITIGATION_OFF &&
656 + boot_cpu_has_bug(X86_BUG_MDS)) {
657 + mds_mitigation = MDS_MITIGATION_FULL;
658 + mds_select_mitigation();
659 + }
660 out:
661 pr_info("%s\n", taa_strings[taa_mitigation]);
662 }
663 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
664 index f0f180158c26..3a281a2decde 100644
665 --- a/arch/x86/kvm/mmu.c
666 +++ b/arch/x86/kvm/mmu.c
667 @@ -2934,7 +2934,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
668 * here.
669 */
670 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
671 - level == PT_PAGE_TABLE_LEVEL &&
672 + !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
673 PageTransCompoundMap(pfn_to_page(pfn)) &&
674 !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
675 unsigned long mask;
676 @@ -4890,9 +4890,9 @@ restart:
677 * the guest, and the guest page table is using 4K page size
678 * mapping if the indirect sp has level = 1.
679 */
680 - if (sp->role.direct &&
681 - !kvm_is_reserved_pfn(pfn) &&
682 - PageTransCompoundMap(pfn_to_page(pfn))) {
683 + if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
684 + !kvm_is_zone_device_pfn(pfn) &&
685 + PageTransCompoundMap(pfn_to_page(pfn))) {
686 drop_spte(kvm, sptep);
687 need_tlb_flush = 1;
688 goto restart;
689 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
690 index 4c0d6d0d6337..f76caa03f4f8 100644
691 --- a/arch/x86/kvm/vmx.c
692 +++ b/arch/x86/kvm/vmx.c
693 @@ -1547,7 +1547,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
694 return -1;
695 }
696
697 -static inline void __invvpid(int ext, u16 vpid, gva_t gva)
698 +static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
699 {
700 struct {
701 u64 vpid : 16;
702 @@ -1561,7 +1561,7 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
703 : : "a"(&operand), "c"(ext) : "cc", "memory");
704 }
705
706 -static inline void __invept(int ext, u64 eptp, gpa_t gpa)
707 +static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
708 {
709 struct {
710 u64 eptp, gpa;
711 diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
712 index a3d2c62fd805..0a3ad5dd1e8b 100644
713 --- a/arch/x86/tools/gen-insn-attr-x86.awk
714 +++ b/arch/x86/tools/gen-insn-attr-x86.awk
715 @@ -68,7 +68,7 @@ BEGIN {
716
717 lprefix1_expr = "\\((66|!F3)\\)"
718 lprefix2_expr = "\\(F3\\)"
719 - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
720 + lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
721 lprefix_expr = "\\((66|F2|F3)\\)"
722 max_lprefix = 4
723
724 @@ -256,7 +256,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
725 return add_flags(imm, mod)
726 }
727
728 -/^[0-9a-f]+\:/ {
729 +/^[0-9a-f]+:/ {
730 if (NR == 1)
731 next
732 # get index
733 diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
734 index 6b0d3ef7309c..2ccfbb61ca89 100644
735 --- a/drivers/acpi/acpi_memhotplug.c
736 +++ b/drivers/acpi/acpi_memhotplug.c
737 @@ -228,7 +228,7 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
738 if (node < 0)
739 node = memory_add_physaddr_to_nid(info->start_addr);
740
741 - result = add_memory(node, info->start_addr, info->length);
742 + result = __add_memory(node, info->start_addr, info->length);
743
744 /*
745 * If the memory block has been used by the kernel, add_memory()
746 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
747 index a0b88f148990..e23e2672a1d6 100644
748 --- a/drivers/atm/zatm.c
749 +++ b/drivers/atm/zatm.c
750 @@ -126,7 +126,7 @@ static unsigned long dummy[2] = {0,0};
751 #define zin_n(r) inl(zatm_dev->base+r*4)
752 #define zin(r) inl(zatm_dev->base+uPD98401_##r*4)
753 #define zout(v,r) outl(v,zatm_dev->base+uPD98401_##r*4)
754 -#define zwait while (zin(CMR) & uPD98401_BUSY)
755 +#define zwait() do {} while (zin(CMR) & uPD98401_BUSY)
756
757 /* RX0, RX1, TX0, TX1 */
758 static const int mbx_entries[NR_MBX] = { 1024,1024,1024,1024 };
759 @@ -140,7 +140,7 @@ static const int mbx_esize[NR_MBX] = { 16,16,4,4 }; /* entry size in bytes */
760
761 static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
762 {
763 - zwait;
764 + zwait();
765 zout(value,CER);
766 zout(uPD98401_IND_ACC | uPD98401_IA_BALL |
767 (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
768 @@ -149,10 +149,10 @@ static void zpokel(struct zatm_dev *zatm_dev,u32 value,u32 addr)
769
770 static u32 zpeekl(struct zatm_dev *zatm_dev,u32 addr)
771 {
772 - zwait;
773 + zwait();
774 zout(uPD98401_IND_ACC | uPD98401_IA_BALL | uPD98401_IA_RW |
775 (uPD98401_IA_TGT_CM << uPD98401_IA_TGT_SHIFT) | addr,CMR);
776 - zwait;
777 + zwait();
778 return zin(CER);
779 }
780
781 @@ -241,7 +241,7 @@ static void refill_pool(struct atm_dev *dev,int pool)
782 }
783 if (first) {
784 spin_lock_irqsave(&zatm_dev->lock, flags);
785 - zwait;
786 + zwait();
787 zout(virt_to_bus(first),CER);
788 zout(uPD98401_ADD_BAT | (pool << uPD98401_POOL_SHIFT) | count,
789 CMR);
790 @@ -508,9 +508,9 @@ static int open_rx_first(struct atm_vcc *vcc)
791 }
792 if (zatm_vcc->pool < 0) return -EMSGSIZE;
793 spin_lock_irqsave(&zatm_dev->lock, flags);
794 - zwait;
795 + zwait();
796 zout(uPD98401_OPEN_CHAN,CMR);
797 - zwait;
798 + zwait();
799 DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
800 chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
801 spin_unlock_irqrestore(&zatm_dev->lock, flags);
802 @@ -571,21 +571,21 @@ static void close_rx(struct atm_vcc *vcc)
803 pos = vcc->vci >> 1;
804 shift = (1-(vcc->vci & 1)) << 4;
805 zpokel(zatm_dev,zpeekl(zatm_dev,pos) & ~(0xffff << shift),pos);
806 - zwait;
807 + zwait();
808 zout(uPD98401_NOP,CMR);
809 - zwait;
810 + zwait();
811 zout(uPD98401_NOP,CMR);
812 spin_unlock_irqrestore(&zatm_dev->lock, flags);
813 }
814 spin_lock_irqsave(&zatm_dev->lock, flags);
815 - zwait;
816 + zwait();
817 zout(uPD98401_DEACT_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
818 uPD98401_CHAN_ADDR_SHIFT),CMR);
819 - zwait;
820 + zwait();
821 udelay(10); /* why oh why ... ? */
822 zout(uPD98401_CLOSE_CHAN | uPD98401_CHAN_RT | (zatm_vcc->rx_chan <<
823 uPD98401_CHAN_ADDR_SHIFT),CMR);
824 - zwait;
825 + zwait();
826 if (!(zin(CMR) & uPD98401_CHAN_ADDR))
827 printk(KERN_CRIT DEV_LABEL "(itf %d): can't close RX channel "
828 "%d\n",vcc->dev->number,zatm_vcc->rx_chan);
829 @@ -699,7 +699,7 @@ printk("NONONONOO!!!!\n");
830 skb_queue_tail(&zatm_vcc->tx_queue,skb);
831 DPRINTK("QRP=0x%08lx\n",zpeekl(zatm_dev,zatm_vcc->tx_chan*VC_SIZE/4+
832 uPD98401_TXVC_QRP));
833 - zwait;
834 + zwait();
835 zout(uPD98401_TX_READY | (zatm_vcc->tx_chan <<
836 uPD98401_CHAN_ADDR_SHIFT),CMR);
837 spin_unlock_irqrestore(&zatm_dev->lock, flags);
838 @@ -891,12 +891,12 @@ static void close_tx(struct atm_vcc *vcc)
839 }
840 spin_lock_irqsave(&zatm_dev->lock, flags);
841 #if 0
842 - zwait;
843 + zwait();
844 zout(uPD98401_DEACT_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
845 #endif
846 - zwait;
847 + zwait();
848 zout(uPD98401_CLOSE_CHAN | (chan << uPD98401_CHAN_ADDR_SHIFT),CMR);
849 - zwait;
850 + zwait();
851 if (!(zin(CMR) & uPD98401_CHAN_ADDR))
852 printk(KERN_CRIT DEV_LABEL "(itf %d): can't close TX channel "
853 "%d\n",vcc->dev->number,chan);
854 @@ -926,9 +926,9 @@ static int open_tx_first(struct atm_vcc *vcc)
855 zatm_vcc->tx_chan = 0;
856 if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
857 spin_lock_irqsave(&zatm_dev->lock, flags);
858 - zwait;
859 + zwait();
860 zout(uPD98401_OPEN_CHAN,CMR);
861 - zwait;
862 + zwait();
863 DPRINTK("0x%x 0x%x\n",zin(CMR),zin(CER));
864 chan = (zin(CMR) & uPD98401_CHAN_ADDR) >> uPD98401_CHAN_ADDR_SHIFT;
865 spin_unlock_irqrestore(&zatm_dev->lock, flags);
866 @@ -1559,7 +1559,7 @@ static void zatm_phy_put(struct atm_dev *dev,unsigned char value,
867 struct zatm_dev *zatm_dev;
868
869 zatm_dev = ZATM_DEV(dev);
870 - zwait;
871 + zwait();
872 zout(value,CER);
873 zout(uPD98401_IND_ACC | uPD98401_IA_B0 |
874 (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
875 @@ -1571,10 +1571,10 @@ static unsigned char zatm_phy_get(struct atm_dev *dev,unsigned long addr)
876 struct zatm_dev *zatm_dev;
877
878 zatm_dev = ZATM_DEV(dev);
879 - zwait;
880 + zwait();
881 zout(uPD98401_IND_ACC | uPD98401_IA_B0 | uPD98401_IA_RW |
882 (uPD98401_IA_TGT_PHY << uPD98401_IA_TGT_SHIFT) | addr,CMR);
883 - zwait;
884 + zwait();
885 return zin(CER) & 0xff;
886 }
887
888 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
889 index c5cdd190b781..6a3694a4843f 100644
890 --- a/drivers/base/memory.c
891 +++ b/drivers/base/memory.c
892 @@ -500,15 +500,20 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
893 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
894 return -EINVAL;
895
896 + ret = lock_device_hotplug_sysfs();
897 + if (ret)
898 + return ret;
899 +
900 nid = memory_add_physaddr_to_nid(phys_addr);
901 - ret = add_memory(nid, phys_addr,
902 - MIN_MEMORY_BLOCK_SIZE * sections_per_block);
903 + ret = __add_memory(nid, phys_addr,
904 + MIN_MEMORY_BLOCK_SIZE * sections_per_block);
905
906 if (ret)
907 goto out;
908
909 ret = count;
910 out:
911 + unlock_device_hotplug();
912 return ret;
913 }
914
915 diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
916 index 5fd50a284168..db4354fb2a0d 100644
917 --- a/drivers/block/amiflop.c
918 +++ b/drivers/block/amiflop.c
919 @@ -1699,11 +1699,41 @@ static const struct block_device_operations floppy_fops = {
920 .check_events = amiga_check_events,
921 };
922
923 +static struct gendisk *fd_alloc_disk(int drive)
924 +{
925 + struct gendisk *disk;
926 +
927 + disk = alloc_disk(1);
928 + if (!disk)
929 + goto out;
930 +
931 + disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
932 + if (IS_ERR(disk->queue)) {
933 + disk->queue = NULL;
934 + goto out_put_disk;
935 + }
936 +
937 + unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
938 + if (!unit[drive].trackbuf)
939 + goto out_cleanup_queue;
940 +
941 + return disk;
942 +
943 +out_cleanup_queue:
944 + blk_cleanup_queue(disk->queue);
945 + disk->queue = NULL;
946 +out_put_disk:
947 + put_disk(disk);
948 +out:
949 + unit[drive].type->code = FD_NODRIVE;
950 + return NULL;
951 +}
952 +
953 static int __init fd_probe_drives(void)
954 {
955 int drive,drives,nomem;
956
957 - printk(KERN_INFO "FD: probing units\nfound ");
958 + pr_info("FD: probing units\nfound");
959 drives=0;
960 nomem=0;
961 for(drive=0;drive<FD_MAX_UNITS;drive++) {
962 @@ -1711,27 +1741,17 @@ static int __init fd_probe_drives(void)
963 fd_probe(drive);
964 if (unit[drive].type->code == FD_NODRIVE)
965 continue;
966 - disk = alloc_disk(1);
967 +
968 + disk = fd_alloc_disk(drive);
969 if (!disk) {
970 - unit[drive].type->code = FD_NODRIVE;
971 + pr_cont(" no mem for fd%d", drive);
972 + nomem = 1;
973 continue;
974 }
975 unit[drive].gendisk = disk;
976 -
977 - disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
978 - if (!disk->queue) {
979 - unit[drive].type->code = FD_NODRIVE;
980 - continue;
981 - }
982 -
983 drives++;
984 - if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
985 - printk("no mem for ");
986 - unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
987 - drives--;
988 - nomem = 1;
989 - }
990 - printk("fd%d ",drive);
991 +
992 + pr_cont(" fd%d",drive);
993 disk->major = FLOPPY_MAJOR;
994 disk->first_minor = drive;
995 disk->fops = &floppy_fops;
996 @@ -1742,11 +1762,11 @@ static int __init fd_probe_drives(void)
997 }
998 if ((drives > 0) || (nomem == 0)) {
999 if (drives == 0)
1000 - printk("no drives");
1001 - printk("\n");
1002 + pr_cont(" no drives");
1003 + pr_cont("\n");
1004 return drives;
1005 }
1006 - printk("\n");
1007 + pr_cont("\n");
1008 return -ENOMEM;
1009 }
1010
1011 @@ -1837,30 +1857,6 @@ out_blkdev:
1012 return ret;
1013 }
1014
1015 -#if 0 /* not safe to unload */
1016 -static int __exit amiga_floppy_remove(struct platform_device *pdev)
1017 -{
1018 - int i;
1019 -
1020 - for( i = 0; i < FD_MAX_UNITS; i++) {
1021 - if (unit[i].type->code != FD_NODRIVE) {
1022 - struct request_queue *q = unit[i].gendisk->queue;
1023 - del_gendisk(unit[i].gendisk);
1024 - put_disk(unit[i].gendisk);
1025 - kfree(unit[i].trackbuf);
1026 - if (q)
1027 - blk_cleanup_queue(q);
1028 - }
1029 - }
1030 - blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
1031 - free_irq(IRQ_AMIGA_CIAA_TB, NULL);
1032 - free_irq(IRQ_AMIGA_DSKBLK, NULL);
1033 - custom.dmacon = DMAF_DISK; /* disable DMA */
1034 - amiga_chip_free(raw_buf);
1035 - unregister_blkdev(FLOPPY_MAJOR, "fd");
1036 -}
1037 -#endif
1038 -
1039 static struct platform_driver amiga_floppy_driver = {
1040 .driver = {
1041 .name = "amiga-floppy",
1042 diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
1043 index 34e04bf87a62..26f9982bab26 100644
1044 --- a/drivers/bluetooth/hci_bcsp.c
1045 +++ b/drivers/bluetooth/hci_bcsp.c
1046 @@ -605,6 +605,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1047 if (*ptr == 0xc0) {
1048 BT_ERR("Short BCSP packet");
1049 kfree_skb(bcsp->rx_skb);
1050 + bcsp->rx_skb = NULL;
1051 bcsp->rx_state = BCSP_W4_PKT_START;
1052 bcsp->rx_count = 0;
1053 } else
1054 @@ -620,6 +621,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1055 bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
1056 BT_ERR("Error in BCSP hdr checksum");
1057 kfree_skb(bcsp->rx_skb);
1058 + bcsp->rx_skb = NULL;
1059 bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
1060 bcsp->rx_count = 0;
1061 continue;
1062 @@ -644,6 +646,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
1063 bscp_get_crc(bcsp));
1064
1065 kfree_skb(bcsp->rx_skb);
1066 + bcsp->rx_skb = NULL;
1067 bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
1068 bcsp->rx_count = 0;
1069 continue;
1070 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1071 index 800ced0a5a24..34548d3b4d13 100644
1072 --- a/drivers/char/virtio_console.c
1073 +++ b/drivers/char/virtio_console.c
1074 @@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
1075 }
1076 }
1077
1078 -static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
1079 +static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
1080 int pages)
1081 {
1082 struct port_buffer *buf;
1083 @@ -445,7 +445,7 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
1084 return buf;
1085 }
1086
1087 - if (is_rproc_serial(vq->vdev)) {
1088 + if (is_rproc_serial(vdev)) {
1089 /*
1090 * Allocate DMA memory from ancestor. When a virtio
1091 * device is created by remoteproc, the DMA memory is
1092 @@ -455,9 +455,9 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
1093 * DMA_MEMORY_INCLUDES_CHILDREN had been supported
1094 * in dma-coherent.c
1095 */
1096 - if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
1097 + if (!vdev->dev.parent || !vdev->dev.parent->parent)
1098 goto free_buf;
1099 - buf->dev = vq->vdev->dev.parent->parent;
1100 + buf->dev = vdev->dev.parent->parent;
1101
1102 /* Increase device refcnt to avoid freeing it */
1103 get_device(buf->dev);
1104 @@ -841,7 +841,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
1105
1106 count = min((size_t)(32 * 1024), count);
1107
1108 - buf = alloc_buf(port->out_vq, count, 0);
1109 + buf = alloc_buf(port->portdev->vdev, count, 0);
1110 if (!buf)
1111 return -ENOMEM;
1112
1113 @@ -960,7 +960,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
1114 if (ret < 0)
1115 goto error_out;
1116
1117 - buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
1118 + buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
1119 if (!buf) {
1120 ret = -ENOMEM;
1121 goto error_out;
1122 @@ -1369,24 +1369,24 @@ static void set_console_size(struct port *port, u16 rows, u16 cols)
1123 port->cons.ws.ws_col = cols;
1124 }
1125
1126 -static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1127 +static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
1128 {
1129 struct port_buffer *buf;
1130 - unsigned int nr_added_bufs;
1131 + int nr_added_bufs;
1132 int ret;
1133
1134 nr_added_bufs = 0;
1135 do {
1136 - buf = alloc_buf(vq, PAGE_SIZE, 0);
1137 + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
1138 if (!buf)
1139 - break;
1140 + return -ENOMEM;
1141
1142 spin_lock_irq(lock);
1143 ret = add_inbuf(vq, buf);
1144 if (ret < 0) {
1145 spin_unlock_irq(lock);
1146 free_buf(buf, true);
1147 - break;
1148 + return ret;
1149 }
1150 nr_added_bufs++;
1151 spin_unlock_irq(lock);
1152 @@ -1406,7 +1406,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1153 char debugfs_name[16];
1154 struct port *port;
1155 dev_t devt;
1156 - unsigned int nr_added_bufs;
1157 int err;
1158
1159 port = kmalloc(sizeof(*port), GFP_KERNEL);
1160 @@ -1465,11 +1464,13 @@ static int add_port(struct ports_device *portdev, u32 id)
1161 spin_lock_init(&port->outvq_lock);
1162 init_waitqueue_head(&port->waitqueue);
1163
1164 - /* Fill the in_vq with buffers so the host can send us data. */
1165 - nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1166 - if (!nr_added_bufs) {
1167 + /* We can safely ignore ENOSPC because it means
1168 + * the queue already has buffers. Buffers are removed
1169 + * only by virtcons_remove(), not by unplug_port()
1170 + */
1171 + err = fill_queue(port->in_vq, &port->inbuf_lock);
1172 + if (err < 0 && err != -ENOSPC) {
1173 dev_err(port->dev, "Error allocating inbufs\n");
1174 - err = -ENOMEM;
1175 goto free_device;
1176 }
1177
1178 @@ -1992,19 +1993,40 @@ static void remove_vqs(struct ports_device *portdev)
1179 kfree(portdev->out_vqs);
1180 }
1181
1182 -static void remove_controlq_data(struct ports_device *portdev)
1183 +static void virtcons_remove(struct virtio_device *vdev)
1184 {
1185 - struct port_buffer *buf;
1186 - unsigned int len;
1187 + struct ports_device *portdev;
1188 + struct port *port, *port2;
1189
1190 - if (!use_multiport(portdev))
1191 - return;
1192 + portdev = vdev->priv;
1193
1194 - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1195 - free_buf(buf, true);
1196 + spin_lock_irq(&pdrvdata_lock);
1197 + list_del(&portdev->list);
1198 + spin_unlock_irq(&pdrvdata_lock);
1199
1200 - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1201 - free_buf(buf, true);
1202 + /* Disable interrupts for vqs */
1203 + vdev->config->reset(vdev);
1204 + /* Finish up work that's lined up */
1205 + if (use_multiport(portdev))
1206 + cancel_work_sync(&portdev->control_work);
1207 + else
1208 + cancel_work_sync(&portdev->config_work);
1209 +
1210 + list_for_each_entry_safe(port, port2, &portdev->ports, list)
1211 + unplug_port(port);
1212 +
1213 + unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1214 +
1215 + /*
1216 + * When yanking out a device, we immediately lose the
1217 + * (device-side) queues. So there's no point in keeping the
1218 + * guest side around till we drop our final reference. This
1219 + * also means that any ports which are in an open state will
1220 + * have to just stop using the port, as the vqs are going
1221 + * away.
1222 + */
1223 + remove_vqs(portdev);
1224 + kfree(portdev);
1225 }
1226
1227 /*
1228 @@ -2073,6 +2095,7 @@ static int virtcons_probe(struct virtio_device *vdev)
1229
1230 spin_lock_init(&portdev->ports_lock);
1231 INIT_LIST_HEAD(&portdev->ports);
1232 + INIT_LIST_HEAD(&portdev->list);
1233
1234 virtio_device_ready(portdev->vdev);
1235
1236 @@ -2080,18 +2103,22 @@ static int virtcons_probe(struct virtio_device *vdev)
1237 INIT_WORK(&portdev->control_work, &control_work_handler);
1238
1239 if (multiport) {
1240 - unsigned int nr_added_bufs;
1241 -
1242 spin_lock_init(&portdev->c_ivq_lock);
1243 spin_lock_init(&portdev->c_ovq_lock);
1244
1245 - nr_added_bufs = fill_queue(portdev->c_ivq,
1246 - &portdev->c_ivq_lock);
1247 - if (!nr_added_bufs) {
1248 + err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
1249 + if (err < 0) {
1250 dev_err(&vdev->dev,
1251 "Error allocating buffers for control queue\n");
1252 - err = -ENOMEM;
1253 - goto free_vqs;
1254 + /*
1255 + * The host might want to notify mgmt sw about device
1256 + * add failure.
1257 + */
1258 + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1259 + VIRTIO_CONSOLE_DEVICE_READY, 0);
1260 + /* Device was functional: we need full cleanup. */
1261 + virtcons_remove(vdev);
1262 + return err;
1263 }
1264 } else {
1265 /*
1266 @@ -2122,11 +2149,6 @@ static int virtcons_probe(struct virtio_device *vdev)
1267
1268 return 0;
1269
1270 -free_vqs:
1271 - /* The host might want to notify mgmt sw about device add failure */
1272 - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1273 - VIRTIO_CONSOLE_DEVICE_READY, 0);
1274 - remove_vqs(portdev);
1275 free_chrdev:
1276 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1277 free:
1278 @@ -2135,43 +2157,6 @@ fail:
1279 return err;
1280 }
1281
1282 -static void virtcons_remove(struct virtio_device *vdev)
1283 -{
1284 - struct ports_device *portdev;
1285 - struct port *port, *port2;
1286 -
1287 - portdev = vdev->priv;
1288 -
1289 - spin_lock_irq(&pdrvdata_lock);
1290 - list_del(&portdev->list);
1291 - spin_unlock_irq(&pdrvdata_lock);
1292 -
1293 - /* Disable interrupts for vqs */
1294 - vdev->config->reset(vdev);
1295 - /* Finish up work that's lined up */
1296 - if (use_multiport(portdev))
1297 - cancel_work_sync(&portdev->control_work);
1298 - else
1299 - cancel_work_sync(&portdev->config_work);
1300 -
1301 - list_for_each_entry_safe(port, port2, &portdev->ports, list)
1302 - unplug_port(port);
1303 -
1304 - unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1305 -
1306 - /*
1307 - * When yanking out a device, we immediately lose the
1308 - * (device-side) queues. So there's no point in keeping the
1309 - * guest side around till we drop our final reference. This
1310 - * also means that any ports which are in an open state will
1311 - * have to just stop using the port, as the vqs are going
1312 - * away.
1313 - */
1314 - remove_controlq_data(portdev);
1315 - remove_vqs(portdev);
1316 - kfree(portdev);
1317 -}
1318 -
1319 static struct virtio_device_id id_table[] = {
1320 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
1321 { 0 },
1322 @@ -2202,15 +2187,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
1323
1324 vdev->config->reset(vdev);
1325
1326 - virtqueue_disable_cb(portdev->c_ivq);
1327 + if (use_multiport(portdev))
1328 + virtqueue_disable_cb(portdev->c_ivq);
1329 cancel_work_sync(&portdev->control_work);
1330 cancel_work_sync(&portdev->config_work);
1331 /*
1332 * Once more: if control_work_handler() was running, it would
1333 * enable the cb as the last step.
1334 */
1335 - virtqueue_disable_cb(portdev->c_ivq);
1336 - remove_controlq_data(portdev);
1337 + if (use_multiport(portdev))
1338 + virtqueue_disable_cb(portdev->c_ivq);
1339
1340 list_for_each_entry(port, &portdev->ports, list) {
1341 virtqueue_disable_cb(port->in_vq);
1342 diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
1343 index 9adaf48aea23..061a9f10218b 100644
1344 --- a/drivers/clk/mmp/clk-of-mmp2.c
1345 +++ b/drivers/clk/mmp/clk-of-mmp2.c
1346 @@ -227,8 +227,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
1347 /* The gate clocks has mux parent. */
1348 {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
1349 {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
1350 - {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
1351 - {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
1352 + {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
1353 + {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
1354 {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
1355 {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
1356 {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
1357 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1358 index d43cd983a7ec..063ce77df619 100644
1359 --- a/drivers/cpufreq/cpufreq.c
1360 +++ b/drivers/cpufreq/cpufreq.c
1361 @@ -875,6 +875,9 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1362 struct freq_attr *fattr = to_attr(attr);
1363 ssize_t ret;
1364
1365 + if (!fattr->show)
1366 + return -EIO;
1367 +
1368 down_read(&policy->rwsem);
1369 ret = fattr->show(policy, buf);
1370 up_read(&policy->rwsem);
1371 @@ -889,6 +892,9 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
1372 struct freq_attr *fattr = to_attr(attr);
1373 ssize_t ret = -EINVAL;
1374
1375 + if (!fattr->store)
1376 + return -EIO;
1377 +
1378 get_online_cpus();
1379
1380 if (cpu_online(policy->cpu)) {
1381 @@ -1646,6 +1652,9 @@ void cpufreq_resume(void)
1382 if (!cpufreq_driver)
1383 return;
1384
1385 + if (unlikely(!cpufreq_suspended))
1386 + return;
1387 +
1388 cpufreq_suspended = false;
1389
1390 if (!has_target() && !cpufreq_driver->resume)
1391 diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
1392 index c46387160976..98cdfc2ee0df 100644
1393 --- a/drivers/firmware/google/gsmi.c
1394 +++ b/drivers/firmware/google/gsmi.c
1395 @@ -480,11 +480,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
1396 if (count < sizeof(u32))
1397 return -EINVAL;
1398 param.type = *(u32 *)buf;
1399 - count -= sizeof(u32);
1400 buf += sizeof(u32);
1401
1402 /* The remaining buffer is the data payload */
1403 - if (count > gsmi_dev.data_buf->length)
1404 + if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
1405 return -EINVAL;
1406 param.data_len = count - sizeof(u32);
1407
1408 @@ -504,7 +503,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
1409
1410 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
1411
1412 - return rc;
1413 + return (rc == 0) ? count : rc;
1414
1415 }
1416
1417 diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
1418 index 4fe0be5aa294..52cac7c600ee 100644
1419 --- a/drivers/gpio/gpio-max77620.c
1420 +++ b/drivers/gpio/gpio-max77620.c
1421 @@ -167,13 +167,13 @@ static int max77620_gpio_set_debounce(struct gpio_chip *gc,
1422 case 0:
1423 val = MAX77620_CNFG_GPIO_DBNC_None;
1424 break;
1425 - case 1000 ... 8000:
1426 + case 1 ... 8000:
1427 val = MAX77620_CNFG_GPIO_DBNC_8ms;
1428 break;
1429 - case 9000 ... 16000:
1430 + case 8001 ... 16000:
1431 val = MAX77620_CNFG_GPIO_DBNC_16ms;
1432 break;
1433 - case 17000 ... 32000:
1434 + case 16001 ... 32000:
1435 val = MAX77620_CNFG_GPIO_DBNC_32ms;
1436 break;
1437 default:
1438 diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
1439 index 592f597d8951..8261afbbafb0 100644
1440 --- a/drivers/isdn/mISDN/tei.c
1441 +++ b/drivers/isdn/mISDN/tei.c
1442 @@ -1180,8 +1180,7 @@ static int
1443 ctrl_teimanager(struct manager *mgr, void *arg)
1444 {
1445 /* currently we only have one option */
1446 - int *val = (int *)arg;
1447 - int ret = 0;
1448 + unsigned int *val = (unsigned int *)arg;
1449
1450 switch (val[0]) {
1451 case IMCLEAR_L2:
1452 @@ -1197,9 +1196,9 @@ ctrl_teimanager(struct manager *mgr, void *arg)
1453 test_and_clear_bit(OPTION_L1_HOLD, &mgr->options);
1454 break;
1455 default:
1456 - ret = -EINVAL;
1457 + return -EINVAL;
1458 }
1459 - return ret;
1460 + return 0;
1461 }
1462
1463 /* This function does create a L2 for fixed TEI in NT Mode */
1464 diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
1465 index ad6223e88340..3d310dd60a0b 100644
1466 --- a/drivers/macintosh/windfarm_smu_sat.c
1467 +++ b/drivers/macintosh/windfarm_smu_sat.c
1468 @@ -22,14 +22,6 @@
1469
1470 #define VERSION "1.0"
1471
1472 -#define DEBUG
1473 -
1474 -#ifdef DEBUG
1475 -#define DBG(args...) printk(args)
1476 -#else
1477 -#define DBG(args...) do { } while(0)
1478 -#endif
1479 -
1480 /* If the cache is older than 800ms we'll refetch it */
1481 #define MAX_AGE msecs_to_jiffies(800)
1482
1483 @@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
1484 buf[i+2] = data[3];
1485 buf[i+3] = data[2];
1486 }
1487 -#ifdef DEBUG
1488 - DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
1489 - for (i = 0; i < len; ++i)
1490 - DBG(" %x", buf[i]);
1491 - DBG("\n");
1492 -#endif
1493
1494 + printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
1495 + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
1496 + 16, 1, buf, len, false);
1497 if (size)
1498 *size = len;
1499 return (struct smu_sdbp_header *) buf;
1500 @@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
1501 if (err < 0)
1502 return err;
1503 sat->last_read = jiffies;
1504 +
1505 #ifdef LOTSA_DEBUG
1506 {
1507 int i;
1508 - DBG(KERN_DEBUG "wf_sat_get: data is");
1509 - for (i = 0; i < 16; ++i)
1510 - DBG(" %.2x", sat->cache[i]);
1511 - DBG("\n");
1512 + printk(KERN_DEBUG "wf_sat_get: data is");
1513 + print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
1514 + 16, 1, sat->cache, 16, false);
1515 }
1516 #endif
1517 return 0;
1518 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1519 index 2ffe7db75acb..36e6221fabab 100644
1520 --- a/drivers/md/dm.c
1521 +++ b/drivers/md/dm.c
1522 @@ -1946,9 +1946,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
1523 set_bit(DMF_FREEING, &md->flags);
1524 spin_unlock(&_minor_lock);
1525
1526 - spin_lock_irq(q->queue_lock);
1527 - queue_flag_set(QUEUE_FLAG_DYING, q);
1528 - spin_unlock_irq(q->queue_lock);
1529 + blk_set_queue_dying(q);
1530
1531 if (dm_request_based(md) && md->kworker_task)
1532 kthread_flush_worker(&md->kworker);
1533 diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
1534 index d300e5e7eadc..2ca9c928ed2f 100644
1535 --- a/drivers/media/platform/vivid/vivid-kthread-cap.c
1536 +++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
1537 @@ -777,7 +777,11 @@ static int vivid_thread_vid_cap(void *data)
1538 if (kthread_should_stop())
1539 break;
1540
1541 - mutex_lock(&dev->mutex);
1542 + if (!mutex_trylock(&dev->mutex)) {
1543 + schedule_timeout_uninterruptible(1);
1544 + continue;
1545 + }
1546 +
1547 cur_jiffies = jiffies;
1548 if (dev->cap_seq_resync) {
1549 dev->jiffies_vid_cap = cur_jiffies;
1550 @@ -930,8 +934,6 @@ void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
1551
1552 /* shutdown control thread */
1553 vivid_grab_controls(dev, false);
1554 - mutex_unlock(&dev->mutex);
1555 kthread_stop(dev->kthread_vid_cap);
1556 dev->kthread_vid_cap = NULL;
1557 - mutex_lock(&dev->mutex);
1558 }
1559 diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
1560 index 7c8d75852816..ed5d8fb854b4 100644
1561 --- a/drivers/media/platform/vivid/vivid-kthread-out.c
1562 +++ b/drivers/media/platform/vivid/vivid-kthread-out.c
1563 @@ -147,7 +147,11 @@ static int vivid_thread_vid_out(void *data)
1564 if (kthread_should_stop())
1565 break;
1566
1567 - mutex_lock(&dev->mutex);
1568 + if (!mutex_trylock(&dev->mutex)) {
1569 + schedule_timeout_uninterruptible(1);
1570 + continue;
1571 + }
1572 +
1573 cur_jiffies = jiffies;
1574 if (dev->out_seq_resync) {
1575 dev->jiffies_vid_out = cur_jiffies;
1576 @@ -301,8 +305,6 @@ void vivid_stop_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
1577
1578 /* shutdown control thread */
1579 vivid_grab_controls(dev, false);
1580 - mutex_unlock(&dev->mutex);
1581 kthread_stop(dev->kthread_vid_out);
1582 dev->kthread_vid_out = NULL;
1583 - mutex_lock(&dev->mutex);
1584 }
1585 diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
1586 index ebd7b9c4dd83..4f49c9a47d49 100644
1587 --- a/drivers/media/platform/vivid/vivid-sdr-cap.c
1588 +++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
1589 @@ -149,7 +149,11 @@ static int vivid_thread_sdr_cap(void *data)
1590 if (kthread_should_stop())
1591 break;
1592
1593 - mutex_lock(&dev->mutex);
1594 + if (!mutex_trylock(&dev->mutex)) {
1595 + schedule_timeout_uninterruptible(1);
1596 + continue;
1597 + }
1598 +
1599 cur_jiffies = jiffies;
1600 if (dev->sdr_cap_seq_resync) {
1601 dev->jiffies_sdr_cap = cur_jiffies;
1602 @@ -309,10 +313,8 @@ static void sdr_cap_stop_streaming(struct vb2_queue *vq)
1603 }
1604
1605 /* shutdown control thread */
1606 - mutex_unlock(&dev->mutex);
1607 kthread_stop(dev->kthread_sdr_cap);
1608 dev->kthread_sdr_cap = NULL;
1609 - mutex_lock(&dev->mutex);
1610 }
1611
1612 const struct vb2_ops vivid_sdr_cap_qops = {
1613 diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
1614 index a72982df4777..82621260fc34 100644
1615 --- a/drivers/media/platform/vivid/vivid-vid-cap.c
1616 +++ b/drivers/media/platform/vivid/vivid-vid-cap.c
1617 @@ -236,9 +236,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
1618 if (vb2_is_streaming(&dev->vb_vid_out_q))
1619 dev->can_loop_video = vivid_vid_can_loop(dev);
1620
1621 - if (dev->kthread_vid_cap)
1622 - return 0;
1623 -
1624 dev->vid_cap_seq_count = 0;
1625 dprintk(dev, 1, "%s\n", __func__);
1626 for (i = 0; i < VIDEO_MAX_FRAME; i++)
1627 diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
1628 index dd609eea4753..8fed2fbe91a9 100644
1629 --- a/drivers/media/platform/vivid/vivid-vid-out.c
1630 +++ b/drivers/media/platform/vivid/vivid-vid-out.c
1631 @@ -158,9 +158,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
1632 if (vb2_is_streaming(&dev->vb_vid_cap_q))
1633 dev->can_loop_video = vivid_vid_can_loop(dev);
1634
1635 - if (dev->kthread_vid_out)
1636 - return 0;
1637 -
1638 dev->vid_out_seq_count = 0;
1639 dprintk(dev, 1, "%s\n", __func__);
1640 if (dev->start_streaming_error) {
1641 diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
1642 index f072bf28fccd..0b386fd518cc 100644
1643 --- a/drivers/media/rc/imon.c
1644 +++ b/drivers/media/rc/imon.c
1645 @@ -1644,8 +1644,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
1646 spin_unlock_irqrestore(&ictx->kc_lock, flags);
1647
1648 /* send touchscreen events through input subsystem if touchpad data */
1649 - if (ictx->display_type == IMON_DISPLAY_TYPE_VGA && len == 8 &&
1650 - buf[7] == 0x86) {
1651 + if (ictx->touch && len == 8 && buf[7] == 0x86) {
1652 imon_touch_event(ictx, buf);
1653 return;
1654
1655 diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1656 index 52bc42da8a4c..1fc3c8d7dd9b 100644
1657 --- a/drivers/media/usb/b2c2/flexcop-usb.c
1658 +++ b/drivers/media/usb/b2c2/flexcop-usb.c
1659 @@ -538,6 +538,9 @@ static int flexcop_usb_probe(struct usb_interface *intf,
1660 struct flexcop_device *fc = NULL;
1661 int ret;
1662
1663 + if (intf->cur_altsetting->desc.bNumEndpoints < 1)
1664 + return -ENODEV;
1665 +
1666 if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
1667 err("out of memory\n");
1668 return -ENOMEM;
1669 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
1670 index b20f03d86e00..2b7a1b569db0 100644
1671 --- a/drivers/media/usb/dvb-usb/cxusb.c
1672 +++ b/drivers/media/usb/dvb-usb/cxusb.c
1673 @@ -437,7 +437,8 @@ static int cxusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
1674 u8 ircode[4];
1675 int i;
1676
1677 - cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4);
1678 + if (cxusb_ctrl_msg(d, CMD_GET_IR_CODE, NULL, 0, ircode, 4) < 0)
1679 + return 0;
1680
1681 *event = 0;
1682 *state = REMOTE_NO_KEY_PRESSED;
1683 diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
1684 index bfdf72355332..230f50aa3000 100644
1685 --- a/drivers/media/usb/usbvision/usbvision-video.c
1686 +++ b/drivers/media/usb/usbvision/usbvision-video.c
1687 @@ -332,6 +332,10 @@ static int usbvision_v4l2_open(struct file *file)
1688 if (mutex_lock_interruptible(&usbvision->v4l2_lock))
1689 return -ERESTARTSYS;
1690
1691 + if (usbvision->remove_pending) {
1692 + err_code = -ENODEV;
1693 + goto unlock;
1694 + }
1695 if (usbvision->user) {
1696 err_code = -EBUSY;
1697 } else {
1698 @@ -395,6 +399,7 @@ unlock:
1699 static int usbvision_v4l2_close(struct file *file)
1700 {
1701 struct usb_usbvision *usbvision = video_drvdata(file);
1702 + int r;
1703
1704 PDEBUG(DBG_IO, "close");
1705
1706 @@ -409,9 +414,10 @@ static int usbvision_v4l2_close(struct file *file)
1707 usbvision_scratch_free(usbvision);
1708
1709 usbvision->user--;
1710 + r = usbvision->remove_pending;
1711 mutex_unlock(&usbvision->v4l2_lock);
1712
1713 - if (usbvision->remove_pending) {
1714 + if (r) {
1715 printk(KERN_INFO "%s: Final disconnect\n", __func__);
1716 usbvision_release(usbvision);
1717 return 0;
1718 @@ -1095,6 +1101,11 @@ static int usbvision_radio_open(struct file *file)
1719
1720 if (mutex_lock_interruptible(&usbvision->v4l2_lock))
1721 return -ERESTARTSYS;
1722 +
1723 + if (usbvision->remove_pending) {
1724 + err_code = -ENODEV;
1725 + goto out;
1726 + }
1727 err_code = v4l2_fh_open(file);
1728 if (err_code)
1729 goto out;
1730 @@ -1127,6 +1138,7 @@ out:
1731 static int usbvision_radio_close(struct file *file)
1732 {
1733 struct usb_usbvision *usbvision = video_drvdata(file);
1734 + int r;
1735
1736 PDEBUG(DBG_IO, "");
1737
1738 @@ -1139,9 +1151,10 @@ static int usbvision_radio_close(struct file *file)
1739 usbvision_audio_off(usbvision);
1740 usbvision->radio = 0;
1741 usbvision->user--;
1742 + r = usbvision->remove_pending;
1743 mutex_unlock(&usbvision->v4l2_lock);
1744
1745 - if (usbvision->remove_pending) {
1746 + if (r) {
1747 printk(KERN_INFO "%s: Final disconnect\n", __func__);
1748 v4l2_fh_release(file);
1749 usbvision_release(usbvision);
1750 @@ -1568,6 +1581,7 @@ err_usb:
1751 static void usbvision_disconnect(struct usb_interface *intf)
1752 {
1753 struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf));
1754 + int u;
1755
1756 PDEBUG(DBG_PROBE, "");
1757
1758 @@ -1584,13 +1598,14 @@ static void usbvision_disconnect(struct usb_interface *intf)
1759 v4l2_device_disconnect(&usbvision->v4l2_dev);
1760 usbvision_i2c_unregister(usbvision);
1761 usbvision->remove_pending = 1; /* Now all ISO data will be ignored */
1762 + u = usbvision->user;
1763
1764 usb_put_dev(usbvision->dev);
1765 usbvision->dev = NULL; /* USB device is no more */
1766
1767 mutex_unlock(&usbvision->v4l2_lock);
1768
1769 - if (usbvision->user) {
1770 + if (u) {
1771 printk(KERN_INFO "%s: In use, disconnect pending\n",
1772 __func__);
1773 wake_up_interruptible(&usbvision->wait_frame);
1774 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
1775 index a905d79381da..7c375b6dd318 100644
1776 --- a/drivers/media/usb/uvc/uvc_driver.c
1777 +++ b/drivers/media/usb/uvc/uvc_driver.c
1778 @@ -2021,6 +2021,21 @@ static int uvc_probe(struct usb_interface *intf,
1779 le16_to_cpu(udev->descriptor.idVendor),
1780 le16_to_cpu(udev->descriptor.idProduct));
1781
1782 + /* Initialize the media device. */
1783 +#ifdef CONFIG_MEDIA_CONTROLLER
1784 + dev->mdev.dev = &intf->dev;
1785 + strscpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1786 + if (udev->serial)
1787 + strscpy(dev->mdev.serial, udev->serial,
1788 + sizeof(dev->mdev.serial));
1789 + usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info));
1790 + dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1791 + dev->mdev.driver_version = LINUX_VERSION_CODE;
1792 + media_device_init(&dev->mdev);
1793 +
1794 + dev->vdev.mdev = &dev->mdev;
1795 +#endif
1796 +
1797 /* Parse the Video Class control descriptor. */
1798 if (uvc_parse_control(dev) < 0) {
1799 uvc_trace(UVC_TRACE_PROBE, "Unable to parse UVC "
1800 @@ -2041,20 +2056,7 @@ static int uvc_probe(struct usb_interface *intf,
1801 "linux-uvc-devel mailing list.\n");
1802 }
1803
1804 - /* Initialize the media device and register the V4L2 device. */
1805 -#ifdef CONFIG_MEDIA_CONTROLLER
1806 - dev->mdev.dev = &intf->dev;
1807 - strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
1808 - if (udev->serial)
1809 - strlcpy(dev->mdev.serial, udev->serial,
1810 - sizeof(dev->mdev.serial));
1811 - strcpy(dev->mdev.bus_info, udev->devpath);
1812 - dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
1813 - dev->mdev.driver_version = LINUX_VERSION_CODE;
1814 - media_device_init(&dev->mdev);
1815 -
1816 - dev->vdev.mdev = &dev->mdev;
1817 -#endif
1818 + /* Register the V4L2 device. */
1819 if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
1820 goto error;
1821
1822 diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
1823 index 0556a9749dbe..1f0c2b594654 100644
1824 --- a/drivers/mfd/arizona-core.c
1825 +++ b/drivers/mfd/arizona-core.c
1826 @@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona)
1827 if (ret != 0)
1828 goto err_ref;
1829 ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
1830 - if (ret != 0)
1831 - goto err_pm;
1832 + if (ret != 0) {
1833 + pm_runtime_put_sync(arizona->dev);
1834 + goto err_ref;
1835 + }
1836 break;
1837 case ARIZONA_32KZ_MCLK2:
1838 ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
1839 @@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona)
1840 ARIZONA_CLK_32K_ENA);
1841 }
1842
1843 -err_pm:
1844 - pm_runtime_put_sync(arizona->dev);
1845 err_ref:
1846 if (ret != 0)
1847 arizona->clk32k_ref--;
1848 diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
1849 index 2d6e2c392786..4a2fc59d5901 100644
1850 --- a/drivers/mfd/max8997.c
1851 +++ b/drivers/mfd/max8997.c
1852 @@ -155,12 +155,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
1853
1854 pd->ono = irq_of_parse_and_map(dev->of_node, 1);
1855
1856 - /*
1857 - * ToDo: the 'wakeup' member in the platform data is more of a linux
1858 - * specfic information. Hence, there is no binding for that yet and
1859 - * not parsed here.
1860 - */
1861 -
1862 return pd;
1863 }
1864
1865 @@ -248,7 +242,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
1866 */
1867
1868 /* MAX8997 has a power button input. */
1869 - device_init_wakeup(max8997->dev, pdata->wakeup);
1870 + device_init_wakeup(max8997->dev, true);
1871
1872 return ret;
1873
1874 diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
1875 index 6c16f170529f..75d52034f89d 100644
1876 --- a/drivers/mfd/mc13xxx-core.c
1877 +++ b/drivers/mfd/mc13xxx-core.c
1878 @@ -278,7 +278,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
1879 if (ret)
1880 goto out;
1881
1882 - adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
1883 + adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
1884 + MC13XXX_ADC0_CHRGRAWDIV;
1885 adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
1886
1887 if (channel > 7)
1888 diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
1889 index cac3bcc308a7..7bb929f05d85 100644
1890 --- a/drivers/misc/mic/scif/scif_fence.c
1891 +++ b/drivers/misc/mic/scif/scif_fence.c
1892 @@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
1893 dma_fail:
1894 if (!x100)
1895 dma_pool_free(ep->remote_dev->signal_pool, status,
1896 - status->src_dma_addr);
1897 + src - offsetof(struct scif_status, val));
1898 alloc_fail:
1899 return err;
1900 }
1901 diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
1902 index 6f9535e5e584..7fc6ce381142 100644
1903 --- a/drivers/mmc/host/mtk-sd.c
1904 +++ b/drivers/mmc/host/mtk-sd.c
1905 @@ -870,6 +870,7 @@ static void msdc_start_command(struct msdc_host *host,
1906 WARN_ON(host->cmd);
1907 host->cmd = cmd;
1908
1909 + mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
1910 if (!msdc_cmd_is_ready(host, mrq, cmd))
1911 return;
1912
1913 @@ -881,7 +882,6 @@ static void msdc_start_command(struct msdc_host *host,
1914
1915 cmd->error = 0;
1916 rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
1917 - mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
1918
1919 sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
1920 writel(cmd->arg, host->base + SDC_ARG);
1921 diff --git a/drivers/net/ethernet/amazon/Kconfig b/drivers/net/ethernet/amazon/Kconfig
1922 index 99b30353541a..9e87d7b8360f 100644
1923 --- a/drivers/net/ethernet/amazon/Kconfig
1924 +++ b/drivers/net/ethernet/amazon/Kconfig
1925 @@ -17,7 +17,7 @@ if NET_VENDOR_AMAZON
1926
1927 config ENA_ETHERNET
1928 tristate "Elastic Network Adapter (ENA) support"
1929 - depends on (PCI_MSI && X86)
1930 + depends on PCI_MSI && !CPU_BIG_ENDIAN
1931 ---help---
1932 This driver supports Elastic Network Adapter (ENA)"
1933
1934 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1935 index 4a4782b3cc1b..a23404480597 100644
1936 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1937 +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
1938 @@ -1078,7 +1078,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1939 break;
1940 }
1941
1942 - return 0;
1943 + return ret;
1944 }
1945
1946 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1947 diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
1948 index 9eb9b68f8935..ae1f963b6092 100644
1949 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c
1950 +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
1951 @@ -65,9 +65,15 @@
1952 *
1953 * The 40 bit 82580 SYSTIM overflows every
1954 * 2^40 * 10^-9 / 60 = 18.3 minutes.
1955 + *
1956 + * SYSTIM is converted to real time using a timecounter. As
1957 + * timecounter_cyc2time() allows old timestamps, the timecounter
1958 + * needs to be updated at least once per half of the SYSTIM interval.
1959 + * Scheduling of delayed work is not very accurate, so we aim for 8
1960 + * minutes to be sure the actual interval is shorter than 9.16 minutes.
1961 */
1962
1963 -#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
1964 +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 8)
1965 #define IGB_PTP_TX_TIMEOUT (HZ * 15)
1966 #define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
1967 #define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
1968 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1969 index 74d2db505866..6068e7c4fc7e 100644
1970 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1971 +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
1972 @@ -1679,6 +1679,7 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1973 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
1974 break;
1975 case ETHTOOL_GRXCLSRLALL:
1976 + cmd->data = MAX_NUM_OF_FS_RULES;
1977 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
1978 err = mlx4_en_get_flow(dev, cmd, i);
1979 if (!err)
1980 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1981 index d1a3a35ba87b..a10f042df01f 100644
1982 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1983 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1984 @@ -1757,7 +1757,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1985
1986 unlock:
1987 mutex_unlock(&esw->state_lock);
1988 - return 0;
1989 + return err;
1990 }
1991
1992 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1993 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1994 index 4b76c69fe86d..834208e55f7b 100644
1995 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1996 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
1997 @@ -883,7 +883,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
1998 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1999
2000 if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
2001 - return 0;
2002 + return 1;
2003
2004 switch (capid) {
2005 case DCB_CAP_ATTR_PG:
2006 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
2007 index 77a5364f7a10..04cbff7f1b23 100644
2008 --- a/drivers/net/ethernet/sfc/ptp.c
2009 +++ b/drivers/net/ethernet/sfc/ptp.c
2010 @@ -1320,7 +1320,8 @@ void efx_ptp_remove(struct efx_nic *efx)
2011 (void)efx_ptp_disable(efx);
2012
2013 cancel_work_sync(&efx->ptp_data->work);
2014 - cancel_work_sync(&efx->ptp_data->pps_work);
2015 + if (efx->ptp_data->pps_workwq)
2016 + cancel_work_sync(&efx->ptp_data->pps_work);
2017
2018 skb_queue_purge(&efx->ptp_data->rxq);
2019 skb_queue_purge(&efx->ptp_data->txq);
2020 diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
2021 index d7cb205fe7e2..892b06852e15 100644
2022 --- a/drivers/net/ethernet/ti/cpsw.c
2023 +++ b/drivers/net/ethernet/ti/cpsw.c
2024 @@ -590,6 +590,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
2025
2026 /* Clear all mcast from ALE */
2027 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
2028 + __dev_mc_unsync(ndev, NULL);
2029
2030 /* Flood All Unicast Packets to Host port */
2031 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2032 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
2033 index da10104be16c..a48ed0873cc7 100644
2034 --- a/drivers/net/macsec.c
2035 +++ b/drivers/net/macsec.c
2036 @@ -2798,9 +2798,6 @@ static int macsec_dev_open(struct net_device *dev)
2037 struct net_device *real_dev = macsec->real_dev;
2038 int err;
2039
2040 - if (!(real_dev->flags & IFF_UP))
2041 - return -ENETDOWN;
2042 -
2043 err = dev_uc_add(real_dev, dev->dev_addr);
2044 if (err < 0)
2045 return err;
2046 @@ -3275,6 +3272,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
2047 if (err < 0)
2048 goto del_dev;
2049
2050 + netif_stacked_transfer_operstate(real_dev, dev);
2051 + linkwatch_fire_event(dev);
2052 +
2053 macsec_generation++;
2054
2055 return 0;
2056 @@ -3446,6 +3446,20 @@ static int macsec_notify(struct notifier_block *this, unsigned long event,
2057 return NOTIFY_DONE;
2058
2059 switch (event) {
2060 + case NETDEV_DOWN:
2061 + case NETDEV_UP:
2062 + case NETDEV_CHANGE: {
2063 + struct macsec_dev *m, *n;
2064 + struct macsec_rxh_data *rxd;
2065 +
2066 + rxd = macsec_data_rtnl(real_dev);
2067 + list_for_each_entry_safe(m, n, &rxd->secys, secys) {
2068 + struct net_device *dev = m->secy.netdev;
2069 +
2070 + netif_stacked_transfer_operstate(real_dev, dev);
2071 + }
2072 + break;
2073 + }
2074 case NETDEV_UNREGISTER: {
2075 struct macsec_dev *m, *n;
2076 struct macsec_rxh_data *rxd;
2077 diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
2078 index a9acf7156855..03009f1becdd 100644
2079 --- a/drivers/net/ntb_netdev.c
2080 +++ b/drivers/net/ntb_netdev.c
2081 @@ -236,7 +236,7 @@ static void ntb_netdev_tx_timer(unsigned long data)
2082 struct ntb_netdev *dev = netdev_priv(ndev);
2083
2084 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
2085 - mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
2086 + mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
2087 } else {
2088 /* Make sure anybody stopping the queue after this sees the new
2089 * value of ntb_transport_tx_free_entry()
2090 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
2091 index b7bac14d1487..d84a362a084a 100644
2092 --- a/drivers/net/wireless/ath/ath10k/pci.c
2093 +++ b/drivers/net/wireless/ath/ath10k/pci.c
2094 @@ -1039,10 +1039,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
2095 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2096 int ret = 0;
2097 u32 *buf;
2098 - unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
2099 + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
2100 struct ath10k_ce_pipe *ce_diag;
2101 void *data_buf = NULL;
2102 - u32 ce_data; /* Host buffer address in CE space */
2103 dma_addr_t ce_data_base = 0;
2104 int i;
2105
2106 @@ -1056,9 +1055,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
2107 * 1) 4-byte alignment
2108 * 2) Buffer in DMA-able space
2109 */
2110 - orig_nbytes = nbytes;
2111 + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
2112 +
2113 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
2114 - orig_nbytes,
2115 + alloc_nbytes,
2116 &ce_data_base,
2117 GFP_ATOMIC);
2118 if (!data_buf) {
2119 @@ -1066,9 +1066,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
2120 goto done;
2121 }
2122
2123 - /* Copy caller's data to allocated DMA buf */
2124 - memcpy(data_buf, data, orig_nbytes);
2125 -
2126 /*
2127 * The address supplied by the caller is in the
2128 * Target CPU virtual address space.
2129 @@ -1081,12 +1078,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
2130 */
2131 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
2132
2133 - remaining_bytes = orig_nbytes;
2134 - ce_data = ce_data_base;
2135 + remaining_bytes = nbytes;
2136 while (remaining_bytes) {
2137 /* FIXME: check cast */
2138 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
2139
2140 + /* Copy caller's data to allocated DMA buf */
2141 + memcpy(data_buf, data, nbytes);
2142 +
2143 /* Set up to receive directly into Target(!) address */
2144 ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
2145 if (ret != 0)
2146 @@ -1096,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
2147 * Request CE to send caller-supplied data that
2148 * was copied to bounce buffer to Target(!) address.
2149 */
2150 - ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
2151 + ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
2152 nbytes, 0, 0);
2153 if (ret != 0)
2154 goto done;
2155 @@ -1137,12 +1136,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
2156
2157 remaining_bytes -= nbytes;
2158 address += nbytes;
2159 - ce_data += nbytes;
2160 + data += nbytes;
2161 }
2162
2163 done:
2164 if (data_buf) {
2165 - dma_free_coherent(ar->dev, orig_nbytes, data_buf,
2166 + dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
2167 ce_data_base);
2168 }
2169
2170 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2171 index 08607d7fdb56..7eff6f8023d8 100644
2172 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2173 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
2174 @@ -4115,7 +4115,7 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
2175
2176 static void ar9003_hw_thermo_cal_apply(struct ath_hw *ah)
2177 {
2178 - u32 data, ko, kg;
2179 + u32 data = 0, ko, kg;
2180
2181 if (!AR_SREV_9462_20_OR_LATER(ah))
2182 return;
2183 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
2184 index 7c2a9a9bc372..b820e80d4b4c 100644
2185 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
2186 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
2187 @@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2188 }
2189
2190 spin_lock_bh(&wl->lock);
2191 + wl->wlc->vif = vif;
2192 wl->mute_tx = false;
2193 brcms_c_mute(wl->wlc, false);
2194 if (vif->type == NL80211_IFTYPE_STATION)
2195 @@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2196 static void
2197 brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2198 {
2199 + struct brcms_info *wl = hw->priv;
2200 +
2201 + spin_lock_bh(&wl->lock);
2202 + wl->wlc->vif = NULL;
2203 + spin_unlock_bh(&wl->lock);
2204 }
2205
2206 static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
2207 @@ -840,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
2208 status = brcms_c_aggregatable(wl->wlc, tid);
2209 spin_unlock_bh(&wl->lock);
2210 if (!status) {
2211 - brcms_err(wl->wlc->hw->d11core,
2212 - "START: tid %d is not agg\'able\n", tid);
2213 + brcms_dbg_ht(wl->wlc->hw->d11core,
2214 + "START: tid %d is not agg\'able\n", tid);
2215 return -EINVAL;
2216 }
2217 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2218 @@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
2219 spin_unlock_bh(&wl->lock);
2220 }
2221
2222 +static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
2223 + struct ieee80211_sta *sta, bool set)
2224 +{
2225 + struct brcms_info *wl = hw->priv;
2226 + struct sk_buff *beacon = NULL;
2227 + u16 tim_offset = 0;
2228 +
2229 + spin_lock_bh(&wl->lock);
2230 + if (wl->wlc->vif)
2231 + beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
2232 + &tim_offset, NULL);
2233 + if (beacon)
2234 + brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
2235 + wl->wlc->vif->bss_conf.dtim_period);
2236 + spin_unlock_bh(&wl->lock);
2237 +
2238 + return 0;
2239 +}
2240 +
2241 static const struct ieee80211_ops brcms_ops = {
2242 .tx = brcms_ops_tx,
2243 .start = brcms_ops_start,
2244 @@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
2245 .flush = brcms_ops_flush,
2246 .get_tsf = brcms_ops_get_tsf,
2247 .set_tsf = brcms_ops_set_tsf,
2248 + .set_tim = brcms_ops_beacon_set_tim,
2249 };
2250
2251 void brcms_dpc(unsigned long data)
2252 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
2253 index c4d135cff04a..9f76b880814e 100644
2254 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
2255 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h
2256 @@ -563,6 +563,7 @@ struct brcms_c_info {
2257
2258 struct wiphy *wiphy;
2259 struct scb pri_scb;
2260 + struct ieee80211_vif *vif;
2261
2262 struct sk_buff *beacon;
2263 u16 beacon_tim_offset;
2264 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
2265 index 69b826d229c5..04939e576ee0 100644
2266 --- a/drivers/net/wireless/cisco/airo.c
2267 +++ b/drivers/net/wireless/cisco/airo.c
2268 @@ -5472,7 +5472,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
2269 we have to add a spin lock... */
2270 rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
2271 while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
2272 - ptr += sprintf(ptr, "%pM %*s rssi = %d",
2273 + ptr += sprintf(ptr, "%pM %.*s rssi = %d",
2274 BSSList_rid.bssid,
2275 (int)BSSList_rid.ssidLen,
2276 BSSList_rid.ssid,
2277 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
2278 index 46d0099fd6e8..94901b0041ce 100644
2279 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
2280 +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
2281 @@ -364,11 +364,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
2282 struct mwifiex_power_cfg power_cfg;
2283 int dbm = MBM_TO_DBM(mbm);
2284
2285 - if (type == NL80211_TX_POWER_FIXED) {
2286 + switch (type) {
2287 + case NL80211_TX_POWER_FIXED:
2288 power_cfg.is_power_auto = 0;
2289 + power_cfg.is_power_fixed = 1;
2290 power_cfg.power_level = dbm;
2291 - } else {
2292 + break;
2293 + case NL80211_TX_POWER_LIMITED:
2294 + power_cfg.is_power_auto = 0;
2295 + power_cfg.is_power_fixed = 0;
2296 + power_cfg.power_level = dbm;
2297 + break;
2298 + case NL80211_TX_POWER_AUTOMATIC:
2299 power_cfg.is_power_auto = 1;
2300 + break;
2301 }
2302
2303 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
2304 diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h
2305 index 536ab834b126..729a69f88a48 100644
2306 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h
2307 +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h
2308 @@ -265,6 +265,7 @@ struct mwifiex_ds_encrypt_key {
2309
2310 struct mwifiex_power_cfg {
2311 u32 is_power_auto;
2312 + u32 is_power_fixed;
2313 u32 power_level;
2314 };
2315
2316 diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
2317 index 7f9645703d96..478885afb6c6 100644
2318 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
2319 +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
2320 @@ -728,6 +728,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
2321 txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf;
2322 txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
2323 if (!power_cfg->is_power_auto) {
2324 + u16 dbm_min = power_cfg->is_power_fixed ?
2325 + dbm : priv->min_tx_power_level;
2326 +
2327 txp_cfg->mode = cpu_to_le32(1);
2328 pg_tlv = (struct mwifiex_types_power_group *)
2329 (buf + sizeof(struct host_cmd_ds_txpwr_cfg));
2330 @@ -742,7 +745,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
2331 pg->last_rate_code = 0x03;
2332 pg->modulation_class = MOD_CLASS_HR_DSSS;
2333 pg->power_step = 0;
2334 - pg->power_min = (s8) dbm;
2335 + pg->power_min = (s8) dbm_min;
2336 pg->power_max = (s8) dbm;
2337 pg++;
2338 /* Power group for modulation class OFDM */
2339 @@ -750,7 +753,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
2340 pg->last_rate_code = 0x07;
2341 pg->modulation_class = MOD_CLASS_OFDM;
2342 pg->power_step = 0;
2343 - pg->power_min = (s8) dbm;
2344 + pg->power_min = (s8) dbm_min;
2345 pg->power_max = (s8) dbm;
2346 pg++;
2347 /* Power group for modulation class HTBW20 */
2348 @@ -758,7 +761,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
2349 pg->last_rate_code = 0x20;
2350 pg->modulation_class = MOD_CLASS_HT;
2351 pg->power_step = 0;
2352 - pg->power_min = (s8) dbm;
2353 + pg->power_min = (s8) dbm_min;
2354 pg->power_max = (s8) dbm;
2355 pg->ht_bandwidth = HT_BW_20;
2356 pg++;
2357 @@ -767,7 +770,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
2358 pg->last_rate_code = 0x20;
2359 pg->modulation_class = MOD_CLASS_HT;
2360 pg->power_step = 0;
2361 - pg->power_min = (s8) dbm;
2362 + pg->power_min = (s8) dbm_min;
2363 pg->power_max = (s8) dbm;
2364 pg->ht_bandwidth = HT_BW_40;
2365 }
2366 diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
2367 index 4e725d165aa6..e78545d4add3 100644
2368 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
2369 +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
2370 @@ -5660,6 +5660,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2371 break;
2372 case WLAN_CIPHER_SUITE_TKIP:
2373 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2374 + break;
2375 default:
2376 return -EOPNOTSUPP;
2377 }
2378 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
2379 index 8de29cc3ced0..a24644f34e65 100644
2380 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
2381 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
2382 @@ -234,7 +234,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
2383 rtl_read_byte(rtlpriv, FW_MAC1_READY));
2384 }
2385 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
2386 - "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n",
2387 + "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
2388 rtl_read_dword(rtlpriv, REG_MCUFWDL));
2389 return -1;
2390 }
2391 diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
2392 index fd4e9ba176c9..332a3a5c1c90 100644
2393 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
2394 +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
2395 @@ -66,7 +66,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
2396 out:
2397 mutex_unlock(&wl->mutex);
2398
2399 - return 0;
2400 + return ret;
2401 }
2402
2403 static int
2404 diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
2405 index 073e4a478c89..3cd995de1bbb 100644
2406 --- a/drivers/nfc/port100.c
2407 +++ b/drivers/nfc/port100.c
2408 @@ -791,7 +791,7 @@ static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
2409
2410 rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
2411 if (rc)
2412 - usb_unlink_urb(dev->out_urb);
2413 + usb_kill_urb(dev->out_urb);
2414
2415 exit:
2416 mutex_unlock(&dev->out_urb_lock);
2417 diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c
2418 index 7310a261c858..e175cbeba266 100644
2419 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c
2420 +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c
2421 @@ -330,7 +330,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
2422 return 0;
2423 }
2424
2425 -static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
2426 +static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
2427 {
2428 u64 shift, mask;
2429
2430 diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
2431 index eac0a1238e9d..c690299d5c4a 100644
2432 --- a/drivers/pci/host/pci-keystone.c
2433 +++ b/drivers/pci/host/pci-keystone.c
2434 @@ -43,6 +43,7 @@
2435 #define PCIE_RC_K2HK 0xb008
2436 #define PCIE_RC_K2E 0xb009
2437 #define PCIE_RC_K2L 0xb00a
2438 +#define PCIE_RC_K2G 0xb00b
2439
2440 #define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
2441
2442 @@ -57,6 +58,8 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
2443 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
2444 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
2445 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
2446 + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
2447 + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
2448 { 0, },
2449 };
2450
2451 diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
2452 index e053f1fa5512..ab2a451f3156 100644
2453 --- a/drivers/pinctrl/pinctrl-lpc18xx.c
2454 +++ b/drivers/pinctrl/pinctrl-lpc18xx.c
2455 @@ -630,14 +630,8 @@ static const struct pinctrl_pin_desc lpc18xx_pins[] = {
2456 LPC18XX_PIN(i2c0_sda, PIN_I2C0_SDA),
2457 };
2458
2459 -/**
2460 - * enum lpc18xx_pin_config_param - possible pin configuration parameters
2461 - * @PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt
2462 - * controller.
2463 - */
2464 -enum lpc18xx_pin_config_param {
2465 - PIN_CONFIG_GPIO_PIN_INT = PIN_CONFIG_END + 1,
2466 -};
2467 +/* PIN_CONFIG_GPIO_PIN_INT: route gpio to the gpio pin interrupt controller */
2468 +#define PIN_CONFIG_GPIO_PIN_INT (PIN_CONFIG_END + 1)
2469
2470 static const struct pinconf_generic_params lpc18xx_params[] = {
2471 {"nxp,gpio-pin-interrupt", PIN_CONFIG_GPIO_PIN_INT, 0},
2472 diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
2473 index e0ecffcbe11f..f8b54cfc90c7 100644
2474 --- a/drivers/pinctrl/pinctrl-zynq.c
2475 +++ b/drivers/pinctrl/pinctrl-zynq.c
2476 @@ -967,15 +967,12 @@ enum zynq_io_standards {
2477 zynq_iostd_max
2478 };
2479
2480 -/**
2481 - * enum zynq_pin_config_param - possible pin configuration parameters
2482 - * @PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
2483 +/*
2484 + * PIN_CONFIG_IOSTANDARD: if the pin can select an IO standard, the argument to
2485 * this parameter (on a custom format) tells the driver which alternative
2486 * IO standard to use.
2487 */
2488 -enum zynq_pin_config_param {
2489 - PIN_CONFIG_IOSTANDARD = PIN_CONFIG_END + 1,
2490 -};
2491 +#define PIN_CONFIG_IOSTANDARD (PIN_CONFIG_END + 1)
2492
2493 static const struct pinconf_generic_params zynq_dt_params[] = {
2494 {"io-standard", PIN_CONFIG_IOSTANDARD, zynq_iostd_lvcmos18},
2495 diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
2496 index 8093afd17aa4..69641c9e7d17 100644
2497 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
2498 +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
2499 @@ -790,10 +790,23 @@ static int pmic_gpio_probe(struct platform_device *pdev)
2500 return ret;
2501 }
2502
2503 - ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
2504 - if (ret) {
2505 - dev_err(dev, "failed to add pin range\n");
2506 - goto err_range;
2507 + /*
2508 + * For DeviceTree-supported systems, the gpio core checks the
2509 + * pinctrl's device node for the "gpio-ranges" property.
2510 + * If it is present, it takes care of adding the pin ranges
2511 + * for the driver. In this case the driver can skip ahead.
2512 + *
2513 + * In order to remain compatible with older, existing DeviceTree
2514 + * files which don't set the "gpio-ranges" property or systems that
2515 + * utilize ACPI the driver has to call gpiochip_add_pin_range().
2516 + */
2517 + if (!of_property_read_bool(dev->of_node, "gpio-ranges")) {
2518 + ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0,
2519 + npins);
2520 + if (ret) {
2521 + dev_err(dev, "failed to add pin range\n");
2522 + goto err_range;
2523 + }
2524 }
2525
2526 return 0;
2527 diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
2528 index 69ffbd7b76f7..0fd7e40b86a0 100644
2529 --- a/drivers/platform/x86/asus-nb-wmi.c
2530 +++ b/drivers/platform/x86/asus-nb-wmi.c
2531 @@ -78,10 +78,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
2532
2533 static struct quirk_entry quirk_asus_unknown = {
2534 .wapf = 0,
2535 + .wmi_backlight_set_devstate = true,
2536 };
2537
2538 static struct quirk_entry quirk_asus_q500a = {
2539 .i8042_filter = asus_q500a_i8042_filter,
2540 + .wmi_backlight_set_devstate = true,
2541 };
2542
2543 /*
2544 @@ -92,15 +94,18 @@ static struct quirk_entry quirk_asus_q500a = {
2545 static struct quirk_entry quirk_asus_x55u = {
2546 .wapf = 4,
2547 .wmi_backlight_power = true,
2548 + .wmi_backlight_set_devstate = true,
2549 .no_display_toggle = true,
2550 };
2551
2552 static struct quirk_entry quirk_asus_wapf4 = {
2553 .wapf = 4,
2554 + .wmi_backlight_set_devstate = true,
2555 };
2556
2557 static struct quirk_entry quirk_asus_x200ca = {
2558 .wapf = 2,
2559 + .wmi_backlight_set_devstate = true,
2560 };
2561
2562 static struct quirk_entry quirk_no_rfkill = {
2563 @@ -114,13 +119,16 @@ static struct quirk_entry quirk_no_rfkill_wapf4 = {
2564
2565 static struct quirk_entry quirk_asus_ux303ub = {
2566 .wmi_backlight_native = true,
2567 + .wmi_backlight_set_devstate = true,
2568 };
2569
2570 static struct quirk_entry quirk_asus_x550lb = {
2571 + .wmi_backlight_set_devstate = true,
2572 .xusb2pr = 0x01D9,
2573 };
2574
2575 -static struct quirk_entry quirk_asus_ux330uak = {
2576 +static struct quirk_entry quirk_asus_forceals = {
2577 + .wmi_backlight_set_devstate = true,
2578 .wmi_force_als_set = true,
2579 };
2580
2581 @@ -431,7 +439,7 @@ static const struct dmi_system_id asus_quirks[] = {
2582 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
2583 DMI_MATCH(DMI_PRODUCT_NAME, "UX330UAK"),
2584 },
2585 - .driver_data = &quirk_asus_ux330uak,
2586 + .driver_data = &quirk_asus_forceals,
2587 },
2588 {
2589 .callback = dmi_matched,
2590 @@ -442,6 +450,15 @@ static const struct dmi_system_id asus_quirks[] = {
2591 },
2592 .driver_data = &quirk_asus_x550lb,
2593 },
2594 + {
2595 + .callback = dmi_matched,
2596 + .ident = "ASUSTeK COMPUTER INC. UX430UQ",
2597 + .matches = {
2598 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
2599 + DMI_MATCH(DMI_PRODUCT_NAME, "UX430UQ"),
2600 + },
2601 + .driver_data = &quirk_asus_forceals,
2602 + },
2603 {},
2604 };
2605
2606 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
2607 index 10bd13b30178..aede41a92cac 100644
2608 --- a/drivers/platform/x86/asus-wmi.c
2609 +++ b/drivers/platform/x86/asus-wmi.c
2610 @@ -2154,7 +2154,7 @@ static int asus_wmi_add(struct platform_device *pdev)
2611 err = asus_wmi_backlight_init(asus);
2612 if (err && err != -ENODEV)
2613 goto fail_backlight;
2614 - } else
2615 + } else if (asus->driver->quirks->wmi_backlight_set_devstate)
2616 err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
2617
2618 status = wmi_install_notify_handler(asus->driver->event_guid,
2619 diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
2620 index 5db052d1de1e..53bab79780e2 100644
2621 --- a/drivers/platform/x86/asus-wmi.h
2622 +++ b/drivers/platform/x86/asus-wmi.h
2623 @@ -45,6 +45,7 @@ struct quirk_entry {
2624 bool store_backlight_power;
2625 bool wmi_backlight_power;
2626 bool wmi_backlight_native;
2627 + bool wmi_backlight_set_devstate;
2628 bool wmi_force_als_set;
2629 int wapf;
2630 /*
2631 diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
2632 index 5dab4665ca3b..3e0eea3aa876 100644
2633 --- a/drivers/rtc/rtc-s35390a.c
2634 +++ b/drivers/rtc/rtc-s35390a.c
2635 @@ -106,7 +106,7 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
2636 */
2637 static int s35390a_reset(struct s35390a *s35390a, char *status1)
2638 {
2639 - char buf;
2640 + u8 buf;
2641 int ret;
2642 unsigned initcount = 0;
2643
2644 diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
2645 index 5ee7f44cf869..830b2d2dcf20 100644
2646 --- a/drivers/scsi/dc395x.c
2647 +++ b/drivers/scsi/dc395x.c
2648 @@ -1972,6 +1972,11 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
2649 xferred -= psge->length;
2650 } else {
2651 /* Partial SG entry done */
2652 + pci_dma_sync_single_for_cpu(srb->dcb->
2653 + acb->dev,
2654 + srb->sg_bus_addr,
2655 + SEGMENTX_LEN,
2656 + PCI_DMA_TODEVICE);
2657 psge->length -= xferred;
2658 psge->address += xferred;
2659 srb->sg_index = idx;
2660 @@ -3450,14 +3455,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
2661 }
2662 }
2663
2664 - if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
2665 - pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
2666 - scsi_sg_count(cmd), dir);
2667 -
2668 ckc_only = 0;
2669 /* Check Error Conditions */
2670 ckc_e:
2671
2672 + pci_unmap_srb(acb, srb);
2673 +
2674 if (cmd->cmnd[0] == INQUIRY) {
2675 unsigned char *base = NULL;
2676 struct ScsiInqData *ptr;
2677 @@ -3511,7 +3514,6 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
2678 cmd, cmd->result);
2679 srb_free_insert(acb, srb);
2680 }
2681 - pci_unmap_srb(acb, srb);
2682
2683 cmd->scsi_done(cmd);
2684 waiting_process_next(acb);
2685 diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
2686 index 02cb76fd4420..6bbf2945a3e0 100644
2687 --- a/drivers/scsi/ips.c
2688 +++ b/drivers/scsi/ips.c
2689 @@ -3500,6 +3500,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
2690
2691 case START_STOP:
2692 scb->scsi_cmd->result = DID_OK << 16;
2693 + break;
2694
2695 case TEST_UNIT_READY:
2696 case INQUIRY:
2697 diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
2698 index 609dafd661d1..da4583a2fa23 100644
2699 --- a/drivers/scsi/isci/host.c
2700 +++ b/drivers/scsi/isci/host.c
2701 @@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2702 * the task management request.
2703 * @task_request: the handle to the task request object to start.
2704 */
2705 -enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2706 - struct isci_remote_device *idev,
2707 - struct isci_request *ireq)
2708 +enum sci_status sci_controller_start_task(struct isci_host *ihost,
2709 + struct isci_remote_device *idev,
2710 + struct isci_request *ireq)
2711 {
2712 enum sci_status status;
2713
2714 @@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2715 "%s: SCIC Controller starting task from invalid "
2716 "state\n",
2717 __func__);
2718 - return SCI_TASK_FAILURE_INVALID_STATE;
2719 + return SCI_FAILURE_INVALID_STATE;
2720 }
2721
2722 status = sci_remote_device_start_task(ihost, idev, ireq);
2723 diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
2724 index 22a9bb1abae1..15dc6e0d8deb 100644
2725 --- a/drivers/scsi/isci/host.h
2726 +++ b/drivers/scsi/isci/host.h
2727 @@ -490,7 +490,7 @@ enum sci_status sci_controller_start_io(
2728 struct isci_remote_device *idev,
2729 struct isci_request *ireq);
2730
2731 -enum sci_task_status sci_controller_start_task(
2732 +enum sci_status sci_controller_start_task(
2733 struct isci_host *ihost,
2734 struct isci_remote_device *idev,
2735 struct isci_request *ireq);
2736 diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
2737 index b709d2b20880..7d71ca421751 100644
2738 --- a/drivers/scsi/isci/request.c
2739 +++ b/drivers/scsi/isci/request.c
2740 @@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
2741
2742 if (status == SCI_SUCCESS) {
2743 if (ireq->stp.rsp.status & ATA_ERR)
2744 - status = SCI_IO_FAILURE_RESPONSE_VALID;
2745 + status = SCI_FAILURE_IO_RESPONSE_VALID;
2746 } else {
2747 - status = SCI_IO_FAILURE_RESPONSE_VALID;
2748 + status = SCI_FAILURE_IO_RESPONSE_VALID;
2749 }
2750
2751 if (status != SCI_SUCCESS) {
2752 diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
2753 index 6dcaed0c1fc8..fb6eba331ac6 100644
2754 --- a/drivers/scsi/isci/task.c
2755 +++ b/drivers/scsi/isci/task.c
2756 @@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
2757 struct isci_tmf *tmf, unsigned long timeout_ms)
2758 {
2759 DECLARE_COMPLETION_ONSTACK(completion);
2760 - enum sci_task_status status = SCI_TASK_FAILURE;
2761 + enum sci_status status = SCI_FAILURE;
2762 struct isci_request *ireq;
2763 int ret = TMF_RESP_FUNC_FAILED;
2764 unsigned long flags;
2765 @@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
2766 /* start the TMF io. */
2767 status = sci_controller_start_task(ihost, idev, ireq);
2768
2769 - if (status != SCI_TASK_SUCCESS) {
2770 + if (status != SCI_SUCCESS) {
2771 dev_dbg(&ihost->pdev->dev,
2772 "%s: start_io failed - status = 0x%x, request = %p\n",
2773 __func__,
2774 diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
2775 index ace4f1f41b8e..d60564397be5 100644
2776 --- a/drivers/scsi/iscsi_tcp.c
2777 +++ b/drivers/scsi/iscsi_tcp.c
2778 @@ -798,7 +798,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
2779 return rc;
2780
2781 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2782 - &addr, param, buf);
2783 + &addr,
2784 + (enum iscsi_param)param, buf);
2785 default:
2786 return iscsi_host_get_param(shost, param, buf);
2787 }
2788 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
2789 index b5be4df05733..3702497b5b16 100644
2790 --- a/drivers/scsi/lpfc/lpfc_els.c
2791 +++ b/drivers/scsi/lpfc/lpfc_els.c
2792 @@ -1141,6 +1141,7 @@ stop_rr_fcf_flogi:
2793 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2794 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
2795 spin_unlock_irq(&phba->hbalock);
2796 + phba->fcf.fcf_redisc_attempted = 0; /* reset */
2797 goto out;
2798 }
2799 if (!rc) {
2800 @@ -1155,6 +1156,7 @@ stop_rr_fcf_flogi:
2801 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2802 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
2803 spin_unlock_irq(&phba->hbalock);
2804 + phba->fcf.fcf_redisc_attempted = 0; /* reset */
2805 goto out;
2806 }
2807 }
2808 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
2809 index 9cca5ddbc50c..6eaba1676846 100644
2810 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
2811 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
2812 @@ -1969,6 +1969,26 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
2813 "failover and change port state:x%x/x%x\n",
2814 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2815 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2816 +
2817 + if (!phba->fcf.fcf_redisc_attempted) {
2818 + lpfc_unregister_fcf(phba);
2819 +
2820 + rc = lpfc_sli4_redisc_fcf_table(phba);
2821 + if (!rc) {
2822 + lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2823 + "3195 Rediscover FCF table\n");
2824 + phba->fcf.fcf_redisc_attempted = 1;
2825 + lpfc_sli4_clear_fcf_rr_bmask(phba);
2826 + } else {
2827 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2828 + "3196 Rediscover FCF table "
2829 + "failed. Status:x%x\n", rc);
2830 + }
2831 + } else {
2832 + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2833 + "3197 Already rediscover FCF table "
2834 + "attempted. No more retry\n");
2835 + }
2836 goto stop_flogi_current_fcf;
2837 } else {
2838 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2839 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
2840 index e9ea8f4ea2c9..2f80b2c0409e 100644
2841 --- a/drivers/scsi/lpfc/lpfc_init.c
2842 +++ b/drivers/scsi/lpfc/lpfc_init.c
2843 @@ -4444,7 +4444,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
2844 break;
2845 }
2846 /* If fast FCF failover rescan event is pending, do nothing */
2847 - if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
2848 + if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
2849 spin_unlock_irq(&phba->hbalock);
2850 break;
2851 }
2852 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
2853 index c05fc61a383b..e1e0feb25003 100644
2854 --- a/drivers/scsi/lpfc/lpfc_sli.c
2855 +++ b/drivers/scsi/lpfc/lpfc_sli.c
2856 @@ -16553,15 +16553,8 @@ next_priority:
2857 goto initial_priority;
2858 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2859 "2844 No roundrobin failover FCF available\n");
2860 - if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
2861 - return LPFC_FCOE_FCF_NEXT_NONE;
2862 - else {
2863 - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2864 - "3063 Only FCF available idx %d, flag %x\n",
2865 - next_fcf_index,
2866 - phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
2867 - return next_fcf_index;
2868 - }
2869 +
2870 + return LPFC_FCOE_FCF_NEXT_NONE;
2871 }
2872
2873 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
2874 diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
2875 index 0b88b5703e0f..9c69c4215de3 100644
2876 --- a/drivers/scsi/lpfc/lpfc_sli4.h
2877 +++ b/drivers/scsi/lpfc/lpfc_sli4.h
2878 @@ -237,6 +237,7 @@ struct lpfc_fcf {
2879 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
2880 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
2881 #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
2882 + uint16_t fcf_redisc_attempted;
2883 uint32_t addr_mode;
2884 uint32_t eligible_fcf_cnt;
2885 struct lpfc_fcf_rec current_rec;
2886 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
2887 index d90693b2767f..c5cc002dfdd5 100644
2888 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
2889 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
2890 @@ -3694,12 +3694,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
2891 /*
2892 * The cur_state should not last for more than max_wait secs
2893 */
2894 - for (i = 0; i < (max_wait * 1000); i++) {
2895 + for (i = 0; i < max_wait; i++) {
2896 curr_abs_state = instance->instancet->
2897 read_fw_status_reg(instance->reg_set);
2898
2899 if (abs_state == curr_abs_state) {
2900 - msleep(1);
2901 + msleep(1000);
2902 } else
2903 break;
2904 }
2905 diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
2906 index cebfd734fd76..a9fef0cd382b 100644
2907 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c
2908 +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
2909 @@ -674,10 +674,6 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
2910 r = _config_request(ioc, &mpi_request, mpi_reply,
2911 MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
2912 sizeof(*config_page));
2913 - mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
2914 - r = _config_request(ioc, &mpi_request, mpi_reply,
2915 - MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
2916 - sizeof(*config_page));
2917 out:
2918 return r;
2919 }
2920 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2921 index ec48c010a3ba..aa2078d7e23e 100644
2922 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2923 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2924 @@ -3297,6 +3297,40 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
2925 return _scsih_check_for_pending_tm(ioc, smid);
2926 }
2927
2928 +/** _scsih_allow_scmd_to_device - check whether scmd needs to
2929 + * issue to IOC or not.
2930 + * @ioc: per adapter object
2931 + * @scmd: pointer to scsi command object
2932 + *
2933 + * Returns true if scmd can be issued to IOC otherwise returns false.
2934 + */
2935 +inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
2936 + struct scsi_cmnd *scmd)
2937 +{
2938 +
2939 + if (ioc->pci_error_recovery)
2940 + return false;
2941 +
2942 + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
2943 + if (ioc->remove_host)
2944 + return false;
2945 +
2946 + return true;
2947 + }
2948 +
2949 + if (ioc->remove_host) {
2950 +
2951 + switch (scmd->cmnd[0]) {
2952 + case SYNCHRONIZE_CACHE:
2953 + case START_STOP:
2954 + return true;
2955 + default:
2956 + return false;
2957 + }
2958 + }
2959 +
2960 + return true;
2961 +}
2962
2963 /**
2964 * _scsih_sas_control_complete - completion routine
2965 @@ -4059,7 +4093,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
2966 return 0;
2967 }
2968
2969 - if (ioc->pci_error_recovery || ioc->remove_host) {
2970 + if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
2971 scmd->result = DID_NO_CONNECT << 16;
2972 scmd->scsi_done(scmd);
2973 return 0;
2974 diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
2975 index a47cf638460a..ccb6f98550da 100644
2976 --- a/drivers/spi/spi-omap2-mcspi.c
2977 +++ b/drivers/spi/spi-omap2-mcspi.c
2978 @@ -298,7 +298,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
2979 struct omap2_mcspi_cs *cs = spi->controller_state;
2980 struct omap2_mcspi *mcspi;
2981 unsigned int wcnt;
2982 - int max_fifo_depth, fifo_depth, bytes_per_word;
2983 + int max_fifo_depth, bytes_per_word;
2984 u32 chconf, xferlevel;
2985
2986 mcspi = spi_master_get_devdata(master);
2987 @@ -314,10 +314,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
2988 else
2989 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
2990
2991 - fifo_depth = gcd(t->len, max_fifo_depth);
2992 - if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
2993 - goto disable_fifo;
2994 -
2995 wcnt = t->len / bytes_per_word;
2996 if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
2997 goto disable_fifo;
2998 @@ -325,16 +321,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
2999 xferlevel = wcnt << 16;
3000 if (t->rx_buf != NULL) {
3001 chconf |= OMAP2_MCSPI_CHCONF_FFER;
3002 - xferlevel |= (fifo_depth - 1) << 8;
3003 + xferlevel |= (bytes_per_word - 1) << 8;
3004 }
3005 +
3006 if (t->tx_buf != NULL) {
3007 chconf |= OMAP2_MCSPI_CHCONF_FFET;
3008 - xferlevel |= fifo_depth - 1;
3009 + xferlevel |= bytes_per_word - 1;
3010 }
3011
3012 mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
3013 mcspi_write_chconf0(spi, chconf);
3014 - mcspi->fifo_depth = fifo_depth;
3015 + mcspi->fifo_depth = max_fifo_depth;
3016
3017 return;
3018 }
3019 @@ -601,7 +598,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
3020 struct dma_slave_config cfg;
3021 enum dma_slave_buswidth width;
3022 unsigned es;
3023 - u32 burst;
3024 void __iomem *chstat_reg;
3025 void __iomem *irqstat_reg;
3026 int wait_res;
3027 @@ -623,22 +619,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
3028 }
3029
3030 count = xfer->len;
3031 - burst = 1;
3032 -
3033 - if (mcspi->fifo_depth > 0) {
3034 - if (count > mcspi->fifo_depth)
3035 - burst = mcspi->fifo_depth / es;
3036 - else
3037 - burst = count / es;
3038 - }
3039
3040 memset(&cfg, 0, sizeof(cfg));
3041 cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
3042 cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
3043 cfg.src_addr_width = width;
3044 cfg.dst_addr_width = width;
3045 - cfg.src_maxburst = burst;
3046 - cfg.dst_maxburst = burst;
3047 + cfg.src_maxburst = 1;
3048 + cfg.dst_maxburst = 1;
3049
3050 rx = xfer->rx_buf;
3051 tx = xfer->tx_buf;
3052 diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
3053 index 711ea523b325..8a69148a962a 100644
3054 --- a/drivers/spi/spi-sh-msiof.c
3055 +++ b/drivers/spi/spi-sh-msiof.c
3056 @@ -1198,8 +1198,8 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
3057
3058 i = platform_get_irq(pdev, 0);
3059 if (i < 0) {
3060 - dev_err(&pdev->dev, "cannot get platform IRQ\n");
3061 - ret = -ENOENT;
3062 + dev_err(&pdev->dev, "cannot get IRQ\n");
3063 + ret = i;
3064 goto err1;
3065 }
3066
3067 diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
3068 index 608403c7586b..f0572d6a5f63 100644
3069 --- a/drivers/staging/comedi/drivers/usbduxfast.c
3070 +++ b/drivers/staging/comedi/drivers/usbduxfast.c
3071 @@ -1,5 +1,5 @@
3072 /*
3073 - * Copyright (C) 2004-2014 Bernd Porr, mail@berndporr.me.uk
3074 + * Copyright (C) 2004-2019 Bernd Porr, mail@berndporr.me.uk
3075 *
3076 * This program is free software; you can redistribute it and/or modify
3077 * it under the terms of the GNU General Public License as published by
3078 @@ -17,7 +17,7 @@
3079 * Description: University of Stirling USB DAQ & INCITE Technology Limited
3080 * Devices: [ITL] USB-DUX-FAST (usbduxfast)
3081 * Author: Bernd Porr <mail@berndporr.me.uk>
3082 - * Updated: 10 Oct 2014
3083 + * Updated: 16 Nov 2019
3084 * Status: stable
3085 */
3086
3087 @@ -31,6 +31,7 @@
3088 *
3089 *
3090 * Revision history:
3091 + * 1.0: Fixed a rounding error in usbduxfast_ai_cmdtest
3092 * 0.9: Dropping the first data packet which seems to be from the last transfer.
3093 * Buffer overflows in the FX2 are handed over to comedi.
3094 * 0.92: Dropping now 4 packets. The quad buffer has to be emptied.
3095 @@ -359,6 +360,7 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
3096 struct comedi_cmd *cmd)
3097 {
3098 int err = 0;
3099 + int err2 = 0;
3100 unsigned int steps;
3101 unsigned int arg;
3102
3103 @@ -408,11 +410,16 @@ static int usbduxfast_ai_cmdtest(struct comedi_device *dev,
3104 */
3105 steps = (cmd->convert_arg * 30) / 1000;
3106 if (cmd->chanlist_len != 1)
3107 - err |= comedi_check_trigger_arg_min(&steps,
3108 - MIN_SAMPLING_PERIOD);
3109 - err |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
3110 - arg = (steps * 1000) / 30;
3111 - err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
3112 + err2 |= comedi_check_trigger_arg_min(&steps,
3113 + MIN_SAMPLING_PERIOD);
3114 + else
3115 + err2 |= comedi_check_trigger_arg_min(&steps, 1);
3116 + err2 |= comedi_check_trigger_arg_max(&steps, MAX_SAMPLING_PERIOD);
3117 + if (err2) {
3118 + err |= err2;
3119 + arg = (steps * 1000) / 30;
3120 + err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
3121 + }
3122
3123 if (cmd->stop_src == TRIG_COUNT)
3124 err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
3125 diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
3126 index 73e5fee6cf1d..83126e2dce36 100644
3127 --- a/drivers/thermal/rcar_thermal.c
3128 +++ b/drivers/thermal/rcar_thermal.c
3129 @@ -401,8 +401,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
3130 rcar_thermal_for_each_priv(priv, common) {
3131 if (rcar_thermal_had_changed(priv, status)) {
3132 rcar_thermal_irq_disable(priv);
3133 - schedule_delayed_work(&priv->work,
3134 - msecs_to_jiffies(300));
3135 + queue_delayed_work(system_freezable_wq, &priv->work,
3136 + msecs_to_jiffies(300));
3137 }
3138 }
3139
3140 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
3141 index 7aca2d4670e4..e645ee1cfd98 100644
3142 --- a/drivers/tty/synclink_gt.c
3143 +++ b/drivers/tty/synclink_gt.c
3144 @@ -1187,14 +1187,13 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
3145 unsigned int cmd, unsigned long arg)
3146 {
3147 struct slgt_info *info = tty->driver_data;
3148 - int rc = -ENOIOCTLCMD;
3149 + int rc;
3150
3151 if (sanity_check(info, tty->name, "compat_ioctl"))
3152 return -ENODEV;
3153 DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
3154
3155 switch (cmd) {
3156 -
3157 case MGSL_IOCSPARAMS32:
3158 rc = set_params32(info, compat_ptr(arg));
3159 break;
3160 @@ -1214,18 +1213,11 @@ static long slgt_compat_ioctl(struct tty_struct *tty,
3161 case MGSL_IOCWAITGPIO:
3162 case MGSL_IOCGXSYNC:
3163 case MGSL_IOCGXCTRL:
3164 - case MGSL_IOCSTXIDLE:
3165 - case MGSL_IOCTXENABLE:
3166 - case MGSL_IOCRXENABLE:
3167 - case MGSL_IOCTXABORT:
3168 - case TIOCMIWAIT:
3169 - case MGSL_IOCSIF:
3170 - case MGSL_IOCSXSYNC:
3171 - case MGSL_IOCSXCTRL:
3172 - rc = ioctl(tty, cmd, arg);
3173 + rc = ioctl(tty, cmd, (unsigned long)compat_ptr(arg));
3174 break;
3175 + default:
3176 + rc = ioctl(tty, cmd, arg);
3177 }
3178 -
3179 DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
3180 return rc;
3181 }
3182 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
3183 index b8092bcf89a2..32dc0d9f0519 100644
3184 --- a/drivers/usb/misc/appledisplay.c
3185 +++ b/drivers/usb/misc/appledisplay.c
3186 @@ -160,8 +160,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
3187 pdata->msgdata, 2,
3188 ACD_USB_TIMEOUT);
3189 mutex_unlock(&pdata->sysfslock);
3190 -
3191 - return retval;
3192 +
3193 + if (retval < 0)
3194 + return retval;
3195 + else
3196 + return 0;
3197 }
3198
3199 static int appledisplay_bl_get_brightness(struct backlight_device *bd)
3200 @@ -179,7 +182,12 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
3201 0,
3202 pdata->msgdata, 2,
3203 ACD_USB_TIMEOUT);
3204 - brightness = pdata->msgdata[1];
3205 + if (retval < 2) {
3206 + if (retval >= 0)
3207 + retval = -EMSGSIZE;
3208 + } else {
3209 + brightness = pdata->msgdata[1];
3210 + }
3211 mutex_unlock(&pdata->sysfslock);
3212
3213 if (retval < 0)
3214 @@ -321,6 +329,7 @@ error:
3215 if (pdata) {
3216 if (pdata->urb) {
3217 usb_kill_urb(pdata->urb);
3218 + cancel_delayed_work_sync(&pdata->work);
3219 if (pdata->urbdata)
3220 usb_free_coherent(pdata->udev, ACD_URB_BUFFER_LEN,
3221 pdata->urbdata, pdata->urb->transfer_dma);
3222 diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
3223 index 64f2eeffaa00..b694e200a98e 100644
3224 --- a/drivers/usb/misc/chaoskey.c
3225 +++ b/drivers/usb/misc/chaoskey.c
3226 @@ -412,13 +412,17 @@ static int _chaoskey_fill(struct chaoskey *dev)
3227 !dev->reading,
3228 (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) );
3229
3230 - if (result < 0)
3231 + if (result < 0) {
3232 + usb_kill_urb(dev->urb);
3233 goto out;
3234 + }
3235
3236 - if (result == 0)
3237 + if (result == 0) {
3238 result = -ETIMEDOUT;
3239 - else
3240 + usb_kill_urb(dev->urb);
3241 + } else {
3242 result = dev->valid;
3243 + }
3244 out:
3245 /* Let the device go back to sleep eventually */
3246 usb_autopm_put_interface(dev->interface);
3247 @@ -554,7 +558,21 @@ static int chaoskey_suspend(struct usb_interface *interface,
3248
3249 static int chaoskey_resume(struct usb_interface *interface)
3250 {
3251 + struct chaoskey *dev;
3252 + struct usb_device *udev = interface_to_usbdev(interface);
3253 +
3254 usb_dbg(interface, "resume");
3255 + dev = usb_get_intfdata(interface);
3256 +
3257 + /*
3258 + * We may have lost power.
3259 + * In that case the device that needs a long time
3260 + * for the first requests needs an extended timeout
3261 + * again
3262 + */
3263 + if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID)
3264 + dev->reads_started = false;
3265 +
3266 return 0;
3267 }
3268 #else
3269 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
3270 index 40c58145bf80..613544d25fad 100644
3271 --- a/drivers/usb/serial/cp210x.c
3272 +++ b/drivers/usb/serial/cp210x.c
3273 @@ -122,6 +122,7 @@ static const struct usb_device_id id_table[] = {
3274 { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
3275 { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
3276 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
3277 + { USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
3278 { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
3279 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
3280 { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
3281 diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
3282 index ea20322e1416..14b45f3e6388 100644
3283 --- a/drivers/usb/serial/mos7720.c
3284 +++ b/drivers/usb/serial/mos7720.c
3285 @@ -1941,10 +1941,6 @@ static int mos7720_startup(struct usb_serial *serial)
3286 }
3287 }
3288
3289 - /* setting configuration feature to one */
3290 - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
3291 - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
3292 -
3293 #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
3294 if (product == MOSCHIP_DEVICE_ID_7715) {
3295 ret_val = mos7715_parport_init(serial);
3296 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
3297 index 03d63bad6be4..0c92252c9316 100644
3298 --- a/drivers/usb/serial/mos7840.c
3299 +++ b/drivers/usb/serial/mos7840.c
3300 @@ -131,11 +131,15 @@
3301 /* This driver also supports
3302 * ATEN UC2324 device using Moschip MCS7840
3303 * ATEN UC2322 device using Moschip MCS7820
3304 + * MOXA UPort 2210 device using Moschip MCS7820
3305 */
3306 #define USB_VENDOR_ID_ATENINTL 0x0557
3307 #define ATENINTL_DEVICE_ID_UC2324 0x2011
3308 #define ATENINTL_DEVICE_ID_UC2322 0x7820
3309
3310 +#define USB_VENDOR_ID_MOXA 0x110a
3311 +#define MOXA_DEVICE_ID_2210 0x2210
3312 +
3313 /* Interrupt Routine Defines */
3314
3315 #define SERIAL_IIR_RLS 0x06
3316 @@ -206,6 +210,7 @@ static const struct usb_device_id id_table[] = {
3317 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
3318 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
3319 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
3320 + {USB_DEVICE(USB_VENDOR_ID_MOXA, MOXA_DEVICE_ID_2210)},
3321 {} /* terminating entry */
3322 };
3323 MODULE_DEVICE_TABLE(usb, id_table);
3324 @@ -2089,6 +2094,7 @@ static int mos7840_probe(struct usb_serial *serial,
3325 const struct usb_device_id *id)
3326 {
3327 u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
3328 + u16 vid = le16_to_cpu(serial->dev->descriptor.idVendor);
3329 u8 *buf;
3330 int device_type;
3331
3332 @@ -2098,6 +2104,11 @@ static int mos7840_probe(struct usb_serial *serial,
3333 goto out;
3334 }
3335
3336 + if (vid == USB_VENDOR_ID_MOXA && product == MOXA_DEVICE_ID_2210) {
3337 + device_type = MOSCHIP_DEVICE_ID_7820;
3338 + goto out;
3339 + }
3340 +
3341 buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
3342 if (!buf)
3343 return -ENOMEM;
3344 @@ -2350,11 +2361,6 @@ out:
3345 goto error;
3346 } else
3347 dev_dbg(&port->dev, "ZLP_REG5 Writing success status%d\n", status);
3348 -
3349 - /* setting configuration feature to one */
3350 - usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
3351 - 0x03, 0x00, 0x01, 0x00, NULL, 0x00,
3352 - MOS_WDR_TIMEOUT);
3353 }
3354 return 0;
3355 error:
3356 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3357 index 00a6e62a68a8..084332a5855e 100644
3358 --- a/drivers/usb/serial/option.c
3359 +++ b/drivers/usb/serial/option.c
3360 @@ -200,6 +200,7 @@ static void option_instat_callback(struct urb *urb);
3361 #define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
3362
3363 #define DELL_PRODUCT_5821E 0x81d7
3364 +#define DELL_PRODUCT_5821E_ESIM 0x81e0
3365
3366 #define KYOCERA_VENDOR_ID 0x0c88
3367 #define KYOCERA_PRODUCT_KPC650 0x17da
3368 @@ -1043,6 +1044,8 @@ static const struct usb_device_id option_ids[] = {
3369 { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
3370 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
3371 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3372 + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
3373 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3374 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
3375 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
3376 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
3377 @@ -1987,6 +1990,10 @@ static const struct usb_device_id option_ids[] = {
3378 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
3379 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
3380 { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
3381 + { USB_DEVICE(0x0489, 0xe0b4), /* Foxconn T77W968 */
3382 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3383 + { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
3384 + .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
3385 { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
3386 .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
3387 { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
3388 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
3389 index 2f09294c5946..e459cd7302e2 100644
3390 --- a/drivers/virtio/virtio_ring.c
3391 +++ b/drivers/virtio/virtio_ring.c
3392 @@ -427,7 +427,7 @@ unmap_release:
3393 kfree(desc);
3394
3395 END_USE(vq);
3396 - return -EIO;
3397 + return -ENOMEM;
3398 }
3399
3400 /**
3401 diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
3402 index 6af117af9780..731cf54f75c6 100644
3403 --- a/drivers/xen/balloon.c
3404 +++ b/drivers/xen/balloon.c
3405 @@ -358,7 +358,10 @@ static enum bp_state reserve_additional_memory(void)
3406 * callers drop the mutex before trying again.
3407 */
3408 mutex_unlock(&balloon_mutex);
3409 + /* add_memory_resource() requires the device_hotplug lock */
3410 + lock_device_hotplug();
3411 rc = add_memory_resource(nid, resource, memhp_auto_online);
3412 + unlock_device_hotplug();
3413 mutex_lock(&balloon_mutex);
3414
3415 if (rc) {
3416 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3417 index 3df434eb1474..3faccbf35e9f 100644
3418 --- a/fs/btrfs/ctree.c
3419 +++ b/fs/btrfs/ctree.c
3420 @@ -2973,6 +2973,10 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
3421
3422 again:
3423 b = get_old_root(root, time_seq);
3424 + if (!b) {
3425 + ret = -EIO;
3426 + goto done;
3427 + }
3428 level = btrfs_header_level(b);
3429 p->locks[level] = BTRFS_READ_LOCK;
3430
3431 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
3432 index 7fcddaaca8a5..049cff197d2a 100644
3433 --- a/fs/ceph/inode.c
3434 +++ b/fs/ceph/inode.c
3435 @@ -1630,7 +1630,6 @@ retry_lookup:
3436 if (IS_ERR(realdn)) {
3437 err = PTR_ERR(realdn);
3438 d_drop(dn);
3439 - dn = NULL;
3440 goto next_item;
3441 }
3442 dn = realdn;
3443 diff --git a/fs/dlm/member.c b/fs/dlm/member.c
3444 index 9c47f1c14a8b..a47ae99f7bcb 100644
3445 --- a/fs/dlm/member.c
3446 +++ b/fs/dlm/member.c
3447 @@ -683,7 +683,7 @@ int dlm_ls_start(struct dlm_ls *ls)
3448
3449 error = dlm_config_nodes(ls->ls_name, &nodes, &count);
3450 if (error < 0)
3451 - goto fail;
3452 + goto fail_rv;
3453
3454 spin_lock(&ls->ls_recover_lock);
3455
3456 @@ -715,8 +715,9 @@ int dlm_ls_start(struct dlm_ls *ls)
3457 return 0;
3458
3459 fail:
3460 - kfree(rv);
3461 kfree(nodes);
3462 + fail_rv:
3463 + kfree(rv);
3464 return error;
3465 }
3466
3467 diff --git a/fs/dlm/user.c b/fs/dlm/user.c
3468 index 9ac65914ab5b..57f2aacec97f 100644
3469 --- a/fs/dlm/user.c
3470 +++ b/fs/dlm/user.c
3471 @@ -700,7 +700,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat,
3472 result.version[0] = DLM_DEVICE_VERSION_MAJOR;
3473 result.version[1] = DLM_DEVICE_VERSION_MINOR;
3474 result.version[2] = DLM_DEVICE_VERSION_PATCH;
3475 - memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
3476 + memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
3477 result.user_lksb = ua->user_lksb;
3478
3479 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
3480 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
3481 index 9041805096e0..0206c8c20784 100644
3482 --- a/fs/f2fs/data.c
3483 +++ b/fs/f2fs/data.c
3484 @@ -1201,6 +1201,7 @@ int do_write_data_page(struct f2fs_io_info *fio)
3485 /* This page is already truncated */
3486 if (fio->old_blkaddr == NULL_ADDR) {
3487 ClearPageUptodate(page);
3488 + clear_cold_data(page);
3489 goto out_writepage;
3490 }
3491
3492 @@ -1337,8 +1338,10 @@ done:
3493 clear_cold_data(page);
3494 out:
3495 inode_dec_dirty_pages(inode);
3496 - if (err)
3497 + if (err) {
3498 ClearPageUptodate(page);
3499 + clear_cold_data(page);
3500 + }
3501
3502 if (wbc->for_reclaim) {
3503 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
3504 @@ -1821,6 +1824,8 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
3505 inode_dec_dirty_pages(inode);
3506 }
3507
3508 + clear_cold_data(page);
3509 +
3510 /* This is atomic written page, keep Private */
3511 if (IS_ATOMIC_WRITTEN_PAGE(page))
3512 return;
3513 @@ -1839,6 +1844,7 @@ int f2fs_release_page(struct page *page, gfp_t wait)
3514 if (IS_ATOMIC_WRITTEN_PAGE(page))
3515 return 0;
3516
3517 + clear_cold_data(page);
3518 set_page_private(page, 0);
3519 ClearPagePrivate(page);
3520 return 1;
3521 diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
3522 index af719d93507e..b414892be08b 100644
3523 --- a/fs/f2fs/dir.c
3524 +++ b/fs/f2fs/dir.c
3525 @@ -772,6 +772,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3526 clear_page_dirty_for_io(page);
3527 ClearPagePrivate(page);
3528 ClearPageUptodate(page);
3529 + clear_cold_data(page);
3530 inode_dec_dirty_pages(dir);
3531 }
3532 f2fs_put_page(page, 1);
3533 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
3534 index 1d5a35213810..c4c84af1ec17 100644
3535 --- a/fs/f2fs/segment.c
3536 +++ b/fs/f2fs/segment.c
3537 @@ -227,8 +227,10 @@ static int __revoke_inmem_pages(struct inode *inode,
3538 }
3539 next:
3540 /* we don't need to invalidate this in the sccessful status */
3541 - if (drop || recover)
3542 + if (drop || recover) {
3543 ClearPageUptodate(page);
3544 + clear_cold_data(page);
3545 + }
3546 set_page_private(page, 0);
3547 ClearPagePrivate(page);
3548 f2fs_put_page(page, 1);
3549 diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
3550 index f77a38755aea..0a80f6636549 100644
3551 --- a/fs/gfs2/rgrp.c
3552 +++ b/fs/gfs2/rgrp.c
3553 @@ -630,7 +630,10 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
3554 RB_CLEAR_NODE(&rs->rs_node);
3555
3556 if (rs->rs_free) {
3557 - struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
3558 + u64 last_block = gfs2_rbm_to_block(&rs->rs_rbm) +
3559 + rs->rs_free - 1;
3560 + struct gfs2_rbm last_rbm = { .rgd = rs->rs_rbm.rgd, };
3561 + struct gfs2_bitmap *start, *last;
3562
3563 /* return reserved blocks to the rgrp */
3564 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
3565 @@ -641,7 +644,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
3566 it will force the number to be recalculated later. */
3567 rgd->rd_extfail_pt += rs->rs_free;
3568 rs->rs_free = 0;
3569 - clear_bit(GBF_FULL, &bi->bi_flags);
3570 + if (gfs2_rbm_from_block(&last_rbm, last_block))
3571 + return;
3572 + start = rbm_bi(&rs->rs_rbm);
3573 + last = rbm_bi(&last_rbm);
3574 + do
3575 + clear_bit(GBF_FULL, &start->bi_flags);
3576 + while (start++ != last);
3577 }
3578 }
3579
3580 diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
3581 index 2e713673df42..85dab71bee74 100644
3582 --- a/fs/hfs/brec.c
3583 +++ b/fs/hfs/brec.c
3584 @@ -444,6 +444,7 @@ skip:
3585 /* restore search_key */
3586 hfs_bnode_read_key(node, fd->search_key, 14);
3587 }
3588 + new_node = NULL;
3589 }
3590
3591 if (!rec && node->parent)
3592 diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
3593 index 320f4372f172..77eff447d301 100644
3594 --- a/fs/hfs/btree.c
3595 +++ b/fs/hfs/btree.c
3596 @@ -219,25 +219,17 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
3597 return node;
3598 }
3599
3600 -struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
3601 +/* Make sure @tree has enough space for the @rsvd_nodes */
3602 +int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
3603 {
3604 - struct hfs_bnode *node, *next_node;
3605 - struct page **pagep;
3606 - u32 nidx, idx;
3607 - unsigned off;
3608 - u16 off16;
3609 - u16 len;
3610 - u8 *data, byte, m;
3611 - int i;
3612 -
3613 - while (!tree->free_nodes) {
3614 - struct inode *inode = tree->inode;
3615 - u32 count;
3616 - int res;
3617 + struct inode *inode = tree->inode;
3618 + u32 count;
3619 + int res;
3620
3621 + while (tree->free_nodes < rsvd_nodes) {
3622 res = hfs_extend_file(inode);
3623 if (res)
3624 - return ERR_PTR(res);
3625 + return res;
3626 HFS_I(inode)->phys_size = inode->i_size =
3627 (loff_t)HFS_I(inode)->alloc_blocks *
3628 HFS_SB(tree->sb)->alloc_blksz;
3629 @@ -245,9 +237,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
3630 tree->sb->s_blocksize_bits;
3631 inode_set_bytes(inode, inode->i_size);
3632 count = inode->i_size >> tree->node_size_shift;
3633 - tree->free_nodes = count - tree->node_count;
3634 + tree->free_nodes += count - tree->node_count;
3635 tree->node_count = count;
3636 }
3637 + return 0;
3638 +}
3639 +
3640 +struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
3641 +{
3642 + struct hfs_bnode *node, *next_node;
3643 + struct page **pagep;
3644 + u32 nidx, idx;
3645 + unsigned off;
3646 + u16 off16;
3647 + u16 len;
3648 + u8 *data, byte, m;
3649 + int i, res;
3650 +
3651 + res = hfs_bmap_reserve(tree, 1);
3652 + if (res)
3653 + return ERR_PTR(res);
3654
3655 nidx = 0;
3656 node = hfs_bnode_find(tree, nidx);
3657 diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
3658 index f6bd266d70b5..2715f416b5a8 100644
3659 --- a/fs/hfs/btree.h
3660 +++ b/fs/hfs/btree.h
3661 @@ -81,6 +81,7 @@ struct hfs_find_data {
3662 extern struct hfs_btree *hfs_btree_open(struct super_block *, u32, btree_keycmp);
3663 extern void hfs_btree_close(struct hfs_btree *);
3664 extern void hfs_btree_write(struct hfs_btree *);
3665 +extern int hfs_bmap_reserve(struct hfs_btree *, int);
3666 extern struct hfs_bnode * hfs_bmap_alloc(struct hfs_btree *);
3667 extern void hfs_bmap_free(struct hfs_bnode *node);
3668
3669 diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
3670 index 8a66405b0f8b..d365bf0b8c77 100644
3671 --- a/fs/hfs/catalog.c
3672 +++ b/fs/hfs/catalog.c
3673 @@ -97,6 +97,14 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
3674 if (err)
3675 return err;
3676
3677 + /*
3678 + * Fail early and avoid ENOSPC during the btree operations. We may
3679 + * have to split the root node at most once.
3680 + */
3681 + err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
3682 + if (err)
3683 + goto err2;
3684 +
3685 hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
3686 entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
3687 HFS_CDR_THD : HFS_CDR_FTH,
3688 @@ -295,6 +303,14 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
3689 return err;
3690 dst_fd = src_fd;
3691
3692 + /*
3693 + * Fail early and avoid ENOSPC during the btree operations. We may
3694 + * have to split the root node at most once.
3695 + */
3696 + err = hfs_bmap_reserve(src_fd.tree, 2 * src_fd.tree->depth);
3697 + if (err)
3698 + goto out;
3699 +
3700 /* find the old dir entry and read the data */
3701 hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
3702 err = hfs_brec_find(&src_fd);
3703 diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
3704 index e33a0d36a93e..cbe4fca96378 100644
3705 --- a/fs/hfs/extent.c
3706 +++ b/fs/hfs/extent.c
3707 @@ -117,6 +117,10 @@ static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
3708 if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
3709 if (res != -ENOENT)
3710 return res;
3711 + /* Fail early and avoid ENOSPC during the btree operation */
3712 + res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
3713 + if (res)
3714 + return res;
3715 hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
3716 HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
3717 } else {
3718 @@ -300,7 +304,7 @@ int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
3719 return 0;
3720
3721 blocks = 0;
3722 - for (i = 0; i < 3; extent++, i++)
3723 + for (i = 0; i < 3; i++)
3724 blocks += be16_to_cpu(extent[i].count);
3725
3726 res = hfs_free_extents(sb, extent, blocks, blocks);
3727 @@ -341,7 +345,9 @@ int hfs_get_block(struct inode *inode, sector_t block,
3728 ablock = (u32)block / HFS_SB(sb)->fs_div;
3729
3730 if (block >= HFS_I(inode)->fs_blocks) {
3731 - if (block > HFS_I(inode)->fs_blocks || !create)
3732 + if (!create)
3733 + return 0;
3734 + if (block > HFS_I(inode)->fs_blocks)
3735 return -EIO;
3736 if (ablock >= HFS_I(inode)->alloc_blocks) {
3737 res = hfs_extend_file(inode);
3738 diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
3739 index f776acf2378a..de0d6d4c46b6 100644
3740 --- a/fs/hfs/inode.c
3741 +++ b/fs/hfs/inode.c
3742 @@ -641,6 +641,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
3743
3744 truncate_setsize(inode, attr->ia_size);
3745 hfs_file_truncate(inode);
3746 + inode->i_atime = inode->i_mtime = inode->i_ctime =
3747 + current_time(inode);
3748 }
3749
3750 setattr_copy(inode, attr);
3751 diff --git a/fs/hfsplus/attributes.c b/fs/hfsplus/attributes.c
3752 index e5b221de7de6..d7455ea70287 100644
3753 --- a/fs/hfsplus/attributes.c
3754 +++ b/fs/hfsplus/attributes.c
3755 @@ -216,6 +216,11 @@ int hfsplus_create_attr(struct inode *inode,
3756 if (err)
3757 goto failed_init_create_attr;
3758
3759 + /* Fail early and avoid ENOSPC during the btree operation */
3760 + err = hfs_bmap_reserve(fd.tree, fd.tree->depth + 1);
3761 + if (err)
3762 + goto failed_create_attr;
3763 +
3764 if (name) {
3765 err = hfsplus_attr_build_key(sb, fd.search_key,
3766 inode->i_ino, name);
3767 @@ -312,6 +317,11 @@ int hfsplus_delete_attr(struct inode *inode, const char *name)
3768 if (err)
3769 return err;
3770
3771 + /* Fail early and avoid ENOSPC during the btree operation */
3772 + err = hfs_bmap_reserve(fd.tree, fd.tree->depth);
3773 + if (err)
3774 + goto out;
3775 +
3776 if (name) {
3777 err = hfsplus_attr_build_key(sb, fd.search_key,
3778 inode->i_ino, name);
3779 diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
3780 index 1002a0c08319..20ce698251ad 100644
3781 --- a/fs/hfsplus/brec.c
3782 +++ b/fs/hfsplus/brec.c
3783 @@ -447,6 +447,7 @@ skip:
3784 /* restore search_key */
3785 hfs_bnode_read_key(node, fd->search_key, 14);
3786 }
3787 + new_node = NULL;
3788 }
3789
3790 if (!rec && node->parent)
3791 diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
3792 index 8d2256454efe..7e96b4c294f7 100644
3793 --- a/fs/hfsplus/btree.c
3794 +++ b/fs/hfsplus/btree.c
3795 @@ -341,26 +341,21 @@ static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
3796 return node;
3797 }
3798
3799 -struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
3800 +/* Make sure @tree has enough space for the @rsvd_nodes */
3801 +int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
3802 {
3803 - struct hfs_bnode *node, *next_node;
3804 - struct page **pagep;
3805 - u32 nidx, idx;
3806 - unsigned off;
3807 - u16 off16;
3808 - u16 len;
3809 - u8 *data, byte, m;
3810 - int i;
3811 + struct inode *inode = tree->inode;
3812 + struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
3813 + u32 count;
3814 + int res;
3815
3816 - while (!tree->free_nodes) {
3817 - struct inode *inode = tree->inode;
3818 - struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
3819 - u32 count;
3820 - int res;
3821 + if (rsvd_nodes <= 0)
3822 + return 0;
3823
3824 + while (tree->free_nodes < rsvd_nodes) {
3825 res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
3826 if (res)
3827 - return ERR_PTR(res);
3828 + return res;
3829 hip->phys_size = inode->i_size =
3830 (loff_t)hip->alloc_blocks <<
3831 HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
3832 @@ -368,9 +363,26 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
3833 hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
3834 inode_set_bytes(inode, inode->i_size);
3835 count = inode->i_size >> tree->node_size_shift;
3836 - tree->free_nodes = count - tree->node_count;
3837 + tree->free_nodes += count - tree->node_count;
3838 tree->node_count = count;
3839 }
3840 + return 0;
3841 +}
3842 +
3843 +struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
3844 +{
3845 + struct hfs_bnode *node, *next_node;
3846 + struct page **pagep;
3847 + u32 nidx, idx;
3848 + unsigned off;
3849 + u16 off16;
3850 + u16 len;
3851 + u8 *data, byte, m;
3852 + int i, res;
3853 +
3854 + res = hfs_bmap_reserve(tree, 1);
3855 + if (res)
3856 + return ERR_PTR(res);
3857
3858 nidx = 0;
3859 node = hfs_bnode_find(tree, nidx);
3860 diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
3861 index a5e00f7a4c14..947da72e72a3 100644
3862 --- a/fs/hfsplus/catalog.c
3863 +++ b/fs/hfsplus/catalog.c
3864 @@ -264,6 +264,14 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
3865 if (err)
3866 return err;
3867
3868 + /*
3869 + * Fail early and avoid ENOSPC during the btree operations. We may
3870 + * have to split the root node at most once.
3871 + */
3872 + err = hfs_bmap_reserve(fd.tree, 2 * fd.tree->depth);
3873 + if (err)
3874 + goto err2;
3875 +
3876 hfsplus_cat_build_key_with_cnid(sb, fd.search_key, cnid);
3877 entry_size = hfsplus_fill_cat_thread(sb, &entry,
3878 S_ISDIR(inode->i_mode) ?
3879 @@ -332,6 +340,14 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
3880 if (err)
3881 return err;
3882
3883 + /*
3884 + * Fail early and avoid ENOSPC during the btree operations. We may
3885 + * have to split the root node at most once.
3886 + */
3887 + err = hfs_bmap_reserve(fd.tree, 2 * (int)fd.tree->depth - 2);
3888 + if (err)
3889 + goto out;
3890 +
3891 if (!str) {
3892 int len;
3893
3894 @@ -432,6 +448,14 @@ int hfsplus_rename_cat(u32 cnid,
3895 return err;
3896 dst_fd = src_fd;
3897
3898 + /*
3899 + * Fail early and avoid ENOSPC during the btree operations. We may
3900 + * have to split the root node at most twice.
3901 + */
3902 + err = hfs_bmap_reserve(src_fd.tree, 4 * (int)src_fd.tree->depth - 1);
3903 + if (err)
3904 + goto out;
3905 +
3906 /* find the old dir entry and read the data */
3907 err = hfsplus_cat_build_key(sb, src_fd.search_key,
3908 src_dir->i_ino, src_name);
3909 diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
3910 index feca524ce2a5..d93c051559cb 100644
3911 --- a/fs/hfsplus/extents.c
3912 +++ b/fs/hfsplus/extents.c
3913 @@ -99,6 +99,10 @@ static int __hfsplus_ext_write_extent(struct inode *inode,
3914 if (hip->extent_state & HFSPLUS_EXT_NEW) {
3915 if (res != -ENOENT)
3916 return res;
3917 + /* Fail early and avoid ENOSPC during the btree operation */
3918 + res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
3919 + if (res)
3920 + return res;
3921 hfs_brec_insert(fd, hip->cached_extents,
3922 sizeof(hfsplus_extent_rec));
3923 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
3924 @@ -232,7 +236,9 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
3925 ablock = iblock >> sbi->fs_shift;
3926
3927 if (iblock >= hip->fs_blocks) {
3928 - if (iblock > hip->fs_blocks || !create)
3929 + if (!create)
3930 + return 0;
3931 + if (iblock > hip->fs_blocks)
3932 return -EIO;
3933 if (ablock >= hip->alloc_blocks) {
3934 res = hfsplus_file_extend(inode, false);
3935 diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
3936 index a3f03b247463..35cd703c6604 100644
3937 --- a/fs/hfsplus/hfsplus_fs.h
3938 +++ b/fs/hfsplus/hfsplus_fs.h
3939 @@ -311,6 +311,7 @@ static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
3940 #define hfs_btree_open hfsplus_btree_open
3941 #define hfs_btree_close hfsplus_btree_close
3942 #define hfs_btree_write hfsplus_btree_write
3943 +#define hfs_bmap_reserve hfsplus_bmap_reserve
3944 #define hfs_bmap_alloc hfsplus_bmap_alloc
3945 #define hfs_bmap_free hfsplus_bmap_free
3946 #define hfs_bnode_read hfsplus_bnode_read
3947 @@ -395,6 +396,7 @@ u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
3948 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id);
3949 void hfs_btree_close(struct hfs_btree *tree);
3950 int hfs_btree_write(struct hfs_btree *tree);
3951 +int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes);
3952 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree);
3953 void hfs_bmap_free(struct hfs_bnode *node);
3954
3955 diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
3956 index 2e796f8302ff..cfd380e2743d 100644
3957 --- a/fs/hfsplus/inode.c
3958 +++ b/fs/hfsplus/inode.c
3959 @@ -260,6 +260,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
3960 }
3961 truncate_setsize(inode, attr->ia_size);
3962 hfsplus_file_truncate(inode);
3963 + inode->i_mtime = inode->i_ctime = current_time(inode);
3964 }
3965
3966 setattr_copy(inode, attr);
3967 diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
3968 index 935bac253991..1403c88f2b05 100644
3969 --- a/fs/ocfs2/buffer_head_io.c
3970 +++ b/fs/ocfs2/buffer_head_io.c
3971 @@ -98,25 +98,34 @@ out:
3972 return ret;
3973 }
3974
3975 +/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
3976 + * will be easier to handle read failure.
3977 + */
3978 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
3979 unsigned int nr, struct buffer_head *bhs[])
3980 {
3981 int status = 0;
3982 unsigned int i;
3983 struct buffer_head *bh;
3984 + int new_bh = 0;
3985
3986 trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
3987
3988 if (!nr)
3989 goto bail;
3990
3991 + /* Don't put buffer head and re-assign it to NULL if it is allocated
3992 + * outside since the caller can't be aware of this alternation!
3993 + */
3994 + new_bh = (bhs[0] == NULL);
3995 +
3996 for (i = 0 ; i < nr ; i++) {
3997 if (bhs[i] == NULL) {
3998 bhs[i] = sb_getblk(osb->sb, block++);
3999 if (bhs[i] == NULL) {
4000 status = -ENOMEM;
4001 mlog_errno(status);
4002 - goto bail;
4003 + break;
4004 }
4005 }
4006 bh = bhs[i];
4007 @@ -156,9 +165,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
4008 submit_bh(REQ_OP_READ, 0, bh);
4009 }
4010
4011 +read_failure:
4012 for (i = nr; i > 0; i--) {
4013 bh = bhs[i - 1];
4014
4015 + if (unlikely(status)) {
4016 + if (new_bh && bh) {
4017 + /* If middle bh fails, let previous bh
4018 + * finish its read and then put it to
4019 + * aovoid bh leak
4020 + */
4021 + if (!buffer_jbd(bh))
4022 + wait_on_buffer(bh);
4023 + put_bh(bh);
4024 + bhs[i - 1] = NULL;
4025 + } else if (bh && buffer_uptodate(bh)) {
4026 + clear_buffer_uptodate(bh);
4027 + }
4028 + continue;
4029 + }
4030 +
4031 /* No need to wait on the buffer if it's managed by JBD. */
4032 if (!buffer_jbd(bh))
4033 wait_on_buffer(bh);
4034 @@ -168,8 +194,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
4035 * so we can safely record this and loop back
4036 * to cleanup the other buffers. */
4037 status = -EIO;
4038 - put_bh(bh);
4039 - bhs[i - 1] = NULL;
4040 + goto read_failure;
4041 }
4042 }
4043
4044 @@ -177,6 +202,9 @@ bail:
4045 return status;
4046 }
4047
4048 +/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
4049 + * will be easier to handle read failure.
4050 + */
4051 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
4052 struct buffer_head *bhs[], int flags,
4053 int (*validate)(struct super_block *sb,
4054 @@ -186,6 +214,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
4055 int i, ignore_cache = 0;
4056 struct buffer_head *bh;
4057 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
4058 + int new_bh = 0;
4059
4060 trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
4061
4062 @@ -211,6 +240,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
4063 goto bail;
4064 }
4065
4066 + /* Don't put buffer head and re-assign it to NULL if it is allocated
4067 + * outside since the caller can't be aware of this alternation!
4068 + */
4069 + new_bh = (bhs[0] == NULL);
4070 +
4071 ocfs2_metadata_cache_io_lock(ci);
4072 for (i = 0 ; i < nr ; i++) {
4073 if (bhs[i] == NULL) {
4074 @@ -219,7 +253,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
4075 ocfs2_metadata_cache_io_unlock(ci);
4076 status = -ENOMEM;
4077 mlog_errno(status);
4078 - goto bail;
4079 + /* Don't forget to put previous bh! */
4080 + break;
4081 }
4082 }
4083 bh = bhs[i];
4084 @@ -313,16 +348,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
4085 }
4086 }
4087
4088 - status = 0;
4089 -
4090 +read_failure:
4091 for (i = (nr - 1); i >= 0; i--) {
4092 bh = bhs[i];
4093
4094 if (!(flags & OCFS2_BH_READAHEAD)) {
4095 - if (status) {
4096 - /* Clear the rest of the buffers on error */
4097 - put_bh(bh);
4098 - bhs[i] = NULL;
4099 + if (unlikely(status)) {
4100 + /* Clear the buffers on error including those
4101 + * ever succeeded in reading
4102 + */
4103 + if (new_bh && bh) {
4104 + /* If middle bh fails, let previous bh
4105 + * finish its read and then put it to
4106 + * aovoid bh leak
4107 + */
4108 + if (!buffer_jbd(bh))
4109 + wait_on_buffer(bh);
4110 + put_bh(bh);
4111 + bhs[i] = NULL;
4112 + } else if (bh && buffer_uptodate(bh)) {
4113 + clear_buffer_uptodate(bh);
4114 + }
4115 continue;
4116 }
4117 /* We know this can't have changed as we hold the
4118 @@ -340,9 +386,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
4119 * uptodate. */
4120 status = -EIO;
4121 clear_buffer_needs_validate(bh);
4122 - put_bh(bh);
4123 - bhs[i] = NULL;
4124 - continue;
4125 + goto read_failure;
4126 }
4127
4128 if (buffer_needs_validate(bh)) {
4129 @@ -352,11 +396,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
4130 BUG_ON(buffer_jbd(bh));
4131 clear_buffer_needs_validate(bh);
4132 status = validate(sb, bh);
4133 - if (status) {
4134 - put_bh(bh);
4135 - bhs[i] = NULL;
4136 - continue;
4137 - }
4138 + if (status)
4139 + goto read_failure;
4140 }
4141 }
4142
4143 diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
4144 index e7b760deefae..32d60f69db24 100644
4145 --- a/fs/ocfs2/dlm/dlmdebug.c
4146 +++ b/fs/ocfs2/dlm/dlmdebug.c
4147 @@ -329,7 +329,7 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle)
4148 {
4149 char *buf;
4150
4151 - buf = (char *) get_zeroed_page(GFP_NOFS);
4152 + buf = (char *) get_zeroed_page(GFP_ATOMIC);
4153 if (buf) {
4154 dump_mle(mle, buf, PAGE_SIZE - 1);
4155 free_page((unsigned long)buf);
4156 diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
4157 index 5729d55da67d..2c3e975126b3 100644
4158 --- a/fs/ocfs2/dlmglue.c
4159 +++ b/fs/ocfs2/dlmglue.c
4160 @@ -3421,7 +3421,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
4161 * we can recover correctly from node failure. Otherwise, we may get
4162 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
4163 */
4164 - if (!ocfs2_is_o2cb_active() &&
4165 + if (ocfs2_userspace_stack(osb) &&
4166 lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
4167 lvb = 1;
4168
4169 diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
4170 index c179afd0051a..afaa044f5f6b 100644
4171 --- a/fs/ocfs2/move_extents.c
4172 +++ b/fs/ocfs2/move_extents.c
4173 @@ -25,6 +25,7 @@
4174 #include "ocfs2_ioctl.h"
4175
4176 #include "alloc.h"
4177 +#include "localalloc.h"
4178 #include "aops.h"
4179 #include "dlmglue.h"
4180 #include "extent_map.h"
4181 @@ -222,6 +223,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
4182 struct ocfs2_refcount_tree *ref_tree = NULL;
4183 u32 new_phys_cpos, new_len;
4184 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
4185 + int need_free = 0;
4186
4187 if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
4188
4189 @@ -315,6 +317,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
4190 if (!partial) {
4191 context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
4192 ret = -ENOSPC;
4193 + need_free = 1;
4194 goto out_commit;
4195 }
4196 }
4197 @@ -339,6 +342,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
4198 mlog_errno(ret);
4199
4200 out_commit:
4201 + if (need_free && context->data_ac) {
4202 + struct ocfs2_alloc_context *data_ac = context->data_ac;
4203 +
4204 + if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
4205 + ocfs2_free_local_alloc_bits(osb, handle, data_ac,
4206 + new_phys_cpos, new_len);
4207 + else
4208 + ocfs2_free_clusters(handle,
4209 + data_ac->ac_inode,
4210 + data_ac->ac_bh,
4211 + ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
4212 + new_len);
4213 + }
4214 +
4215 ocfs2_commit_trans(osb, handle);
4216
4217 out_unlock_mutex:
4218 diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
4219 index 820359096c7a..52c07346bea3 100644
4220 --- a/fs/ocfs2/stackglue.c
4221 +++ b/fs/ocfs2/stackglue.c
4222 @@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
4223 */
4224 static struct ocfs2_stack_plugin *active_stack;
4225
4226 -inline int ocfs2_is_o2cb_active(void)
4227 -{
4228 - return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
4229 -}
4230 -EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
4231 -
4232 static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
4233 {
4234 struct ocfs2_stack_plugin *p;
4235 diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
4236 index e3036e1790e8..f2dce10fae54 100644
4237 --- a/fs/ocfs2/stackglue.h
4238 +++ b/fs/ocfs2/stackglue.h
4239 @@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
4240 int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
4241 void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
4242
4243 -/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
4244 -int ocfs2_is_o2cb_active(void);
4245 -
4246 extern struct kset *ocfs2_kset;
4247
4248 #endif /* STACKGLUE_H */
4249 diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
4250 index c387467d574c..e108c945ac1f 100644
4251 --- a/fs/ocfs2/xattr.c
4252 +++ b/fs/ocfs2/xattr.c
4253 @@ -1497,6 +1497,18 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
4254 return loc->xl_ops->xlo_check_space(loc, xi);
4255 }
4256
4257 +static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
4258 +{
4259 + loc->xl_ops->xlo_add_entry(loc, name_hash);
4260 + loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
4261 + /*
4262 + * We can't leave the new entry's xe_name_offset at zero or
4263 + * add_namevalue() will go nuts. We set it to the size of our
4264 + * storage so that it can never be less than any other entry.
4265 + */
4266 + loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
4267 +}
4268 +
4269 static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
4270 struct ocfs2_xattr_info *xi)
4271 {
4272 @@ -2128,31 +2140,29 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
4273 if (rc)
4274 goto out;
4275
4276 - if (!loc->xl_entry) {
4277 - rc = -EINVAL;
4278 - goto out;
4279 - }
4280 -
4281 - if (ocfs2_xa_can_reuse_entry(loc, xi)) {
4282 - orig_value_size = loc->xl_entry->xe_value_size;
4283 - rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
4284 - if (rc)
4285 - goto out;
4286 - goto alloc_value;
4287 - }
4288 + if (loc->xl_entry) {
4289 + if (ocfs2_xa_can_reuse_entry(loc, xi)) {
4290 + orig_value_size = loc->xl_entry->xe_value_size;
4291 + rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
4292 + if (rc)
4293 + goto out;
4294 + goto alloc_value;
4295 + }
4296
4297 - if (!ocfs2_xattr_is_local(loc->xl_entry)) {
4298 - orig_clusters = ocfs2_xa_value_clusters(loc);
4299 - rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
4300 - if (rc) {
4301 - mlog_errno(rc);
4302 - ocfs2_xa_cleanup_value_truncate(loc,
4303 - "overwriting",
4304 - orig_clusters);
4305 - goto out;
4306 + if (!ocfs2_xattr_is_local(loc->xl_entry)) {
4307 + orig_clusters = ocfs2_xa_value_clusters(loc);
4308 + rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
4309 + if (rc) {
4310 + mlog_errno(rc);
4311 + ocfs2_xa_cleanup_value_truncate(loc,
4312 + "overwriting",
4313 + orig_clusters);
4314 + goto out;
4315 + }
4316 }
4317 - }
4318 - ocfs2_xa_wipe_namevalue(loc);
4319 + ocfs2_xa_wipe_namevalue(loc);
4320 + } else
4321 + ocfs2_xa_add_entry(loc, name_hash);
4322
4323 /*
4324 * If we get here, we have a blank entry. Fill it. We grow our
4325 diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
4326 index 651755353374..0b58b9d419e8 100644
4327 --- a/fs/xfs/xfs_buf.c
4328 +++ b/fs/xfs/xfs_buf.c
4329 @@ -57,6 +57,32 @@ static kmem_zone_t *xfs_buf_zone;
4330 #define xb_to_gfp(flags) \
4331 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
4332
4333 +/*
4334 + * Locking orders
4335 + *
4336 + * xfs_buf_ioacct_inc:
4337 + * xfs_buf_ioacct_dec:
4338 + * b_sema (caller holds)
4339 + * b_lock
4340 + *
4341 + * xfs_buf_stale:
4342 + * b_sema (caller holds)
4343 + * b_lock
4344 + * lru_lock
4345 + *
4346 + * xfs_buf_rele:
4347 + * b_lock
4348 + * pag_buf_lock
4349 + * lru_lock
4350 + *
4351 + * xfs_buftarg_wait_rele
4352 + * lru_lock
4353 + * b_lock (trylock due to inversion)
4354 + *
4355 + * xfs_buftarg_isolate
4356 + * lru_lock
4357 + * b_lock (trylock due to inversion)
4358 + */
4359
4360 static inline int
4361 xfs_buf_is_vmapped(
4362 @@ -957,8 +983,18 @@ xfs_buf_rele(
4363
4364 ASSERT(atomic_read(&bp->b_hold) > 0);
4365
4366 - release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
4367 + /*
4368 + * We grab the b_lock here first to serialise racing xfs_buf_rele()
4369 + * calls. The pag_buf_lock being taken on the last reference only
4370 + * serialises against racing lookups in xfs_buf_find(). IOWs, the second
4371 + * to last reference we drop here is not serialised against the last
4372 + * reference until we take bp->b_lock. Hence if we don't grab b_lock
4373 + * first, the last "release" reference can win the race to the lock and
4374 + * free the buffer before the second-to-last reference is processed,
4375 + * leading to a use-after-free scenario.
4376 + */
4377 spin_lock(&bp->b_lock);
4378 + release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
4379 if (!release) {
4380 /*
4381 * Drop the in-flight state if the buffer is already on the LRU
4382 diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
4383 index 3b77588a9360..dec03c0dbc21 100644
4384 --- a/include/linux/bitmap.h
4385 +++ b/include/linux/bitmap.h
4386 @@ -185,8 +185,13 @@ extern int bitmap_print_to_pagebuf(bool list, char *buf,
4387 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
4388 #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
4389
4390 +/*
4391 + * The static inlines below do not handle constant nbits==0 correctly,
4392 + * so make such users (should any ever turn up) call the out-of-line
4393 + * versions.
4394 + */
4395 #define small_const_nbits(nbits) \
4396 - (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
4397 + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
4398
4399 static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
4400 {
4401 @@ -316,7 +321,7 @@ static __always_inline int bitmap_weight(const unsigned long *src, unsigned int
4402 }
4403
4404 static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
4405 - unsigned int shift, int nbits)
4406 + unsigned int shift, unsigned int nbits)
4407 {
4408 if (small_const_nbits(nbits))
4409 *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
4410 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
4411 index 0590e7d47b02..ab90a8541aaa 100644
4412 --- a/include/linux/kvm_host.h
4413 +++ b/include/linux/kvm_host.h
4414 @@ -843,6 +843,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
4415 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
4416
4417 bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
4418 +bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
4419
4420 struct kvm_irq_ack_notifier {
4421 struct hlist_node link;
4422 diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
4423 index 134a2f69c21a..9469eef30095 100644
4424 --- a/include/linux/memory_hotplug.h
4425 +++ b/include/linux/memory_hotplug.h
4426 @@ -272,6 +272,7 @@ static inline void remove_memory(int nid, u64 start, u64 size) {}
4427
4428 extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
4429 void *arg, int (*func)(struct memory_block *, void *));
4430 +extern int __add_memory(int nid, u64 start, u64 size);
4431 extern int add_memory(int nid, u64 start, u64 size);
4432 extern int add_memory_resource(int nid, struct resource *resource, bool online);
4433 extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
4434 diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
4435 index cf815577bd68..3ae1fe743bc3 100644
4436 --- a/include/linux/mfd/max8997.h
4437 +++ b/include/linux/mfd/max8997.h
4438 @@ -178,7 +178,6 @@ struct max8997_led_platform_data {
4439 struct max8997_platform_data {
4440 /* IRQ */
4441 int ono;
4442 - int wakeup;
4443
4444 /* ---- PMIC ---- */
4445 struct max8997_regulator_data *regulators;
4446 diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
4447 index 638222e43e48..93011c61aafd 100644
4448 --- a/include/linux/mfd/mc13xxx.h
4449 +++ b/include/linux/mfd/mc13xxx.h
4450 @@ -247,6 +247,7 @@ struct mc13xxx_platform_data {
4451 #define MC13XXX_ADC0_TSMOD0 (1 << 12)
4452 #define MC13XXX_ADC0_TSMOD1 (1 << 13)
4453 #define MC13XXX_ADC0_TSMOD2 (1 << 14)
4454 +#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
4455 #define MC13XXX_ADC0_ADINC1 (1 << 16)
4456 #define MC13XXX_ADC0_ADINC2 (1 << 17)
4457
4458 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
4459 index c2aaf539728f..854e90be1a02 100644
4460 --- a/kernel/auditsc.c
4461 +++ b/kernel/auditsc.c
4462 @@ -1096,7 +1096,7 @@ static void audit_log_execve_info(struct audit_context *context,
4463 }
4464
4465 /* write as much as we can to the audit log */
4466 - if (len_buf > 0) {
4467 + if (len_buf >= 0) {
4468 /* NOTE: some magic numbers here - basically if we
4469 * can't fit a reasonable amount of data into the
4470 * existing audit buffer, flush it and start with
4471 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
4472 index a0339c458c14..c1873d325ebd 100644
4473 --- a/kernel/printk/printk.c
4474 +++ b/kernel/printk/printk.c
4475 @@ -1050,7 +1050,7 @@ void __init setup_log_buf(int early)
4476 {
4477 unsigned long flags;
4478 char *new_log_buf;
4479 - int free;
4480 + unsigned int free;
4481
4482 if (log_buf != __log_buf)
4483 return;
4484 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
4485 index d8afae1bd5c5..b765a58cf20f 100644
4486 --- a/kernel/sched/fair.c
4487 +++ b/kernel/sched/fair.c
4488 @@ -7950,13 +7950,22 @@ out_all_pinned:
4489 sd->nr_balance_failed = 0;
4490
4491 out_one_pinned:
4492 + ld_moved = 0;
4493 +
4494 + /*
4495 + * idle_balance() disregards balance intervals, so we could repeatedly
4496 + * reach this code, which would lead to balance_interval skyrocketting
4497 + * in a short amount of time. Skip the balance_interval increase logic
4498 + * to avoid that.
4499 + */
4500 + if (env.idle == CPU_NEWLY_IDLE)
4501 + goto out;
4502 +
4503 /* tune up the balancing interval */
4504 if (((env.flags & LBF_ALL_PINNED) &&
4505 sd->balance_interval < MAX_PINNED_INTERVAL) ||
4506 (sd->balance_interval < sd->max_interval))
4507 sd->balance_interval *= 2;
4508 -
4509 - ld_moved = 0;
4510 out:
4511 return ld_moved;
4512 }
4513 diff --git a/mm/ksm.c b/mm/ksm.c
4514 index 614b2cce9ad7..d6c81a5076a7 100644
4515 --- a/mm/ksm.c
4516 +++ b/mm/ksm.c
4517 @@ -710,13 +710,13 @@ static int remove_stable_node(struct stable_node *stable_node)
4518 return 0;
4519 }
4520
4521 - if (WARN_ON_ONCE(page_mapped(page))) {
4522 - /*
4523 - * This should not happen: but if it does, just refuse to let
4524 - * merge_across_nodes be switched - there is no need to panic.
4525 - */
4526 - err = -EBUSY;
4527 - } else {
4528 + /*
4529 + * Page could be still mapped if this races with __mmput() running in
4530 + * between ksm_exit() and exit_mmap(). Just refuse to let
4531 + * merge_across_nodes/max_page_sharing be switched.
4532 + */
4533 + err = -EBUSY;
4534 + if (!page_mapped(page)) {
4535 /*
4536 * The stable node did not yet appear stale to get_ksm_page(),
4537 * since that allows for an unmapped ksm page to be recognized
4538 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4539 index b4c8d7b9ab82..449999657c0b 100644
4540 --- a/mm/memory_hotplug.c
4541 +++ b/mm/memory_hotplug.c
4542 @@ -1340,7 +1340,12 @@ static int online_memory_block(struct memory_block *mem, void *arg)
4543 return memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
4544 }
4545
4546 -/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
4547 +/*
4548 + * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
4549 + * and online/offline operations (triggered e.g. by sysfs).
4550 + *
4551 + * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
4552 + */
4553 int __ref add_memory_resource(int nid, struct resource *res, bool online)
4554 {
4555 u64 start, size;
4556 @@ -1418,9 +1423,9 @@ out:
4557 mem_hotplug_done();
4558 return ret;
4559 }
4560 -EXPORT_SYMBOL_GPL(add_memory_resource);
4561
4562 -int __ref add_memory(int nid, u64 start, u64 size)
4563 +/* requires device_hotplug_lock, see add_memory_resource() */
4564 +int __ref __add_memory(int nid, u64 start, u64 size)
4565 {
4566 struct resource *res;
4567 int ret;
4568 @@ -1434,6 +1439,17 @@ int __ref add_memory(int nid, u64 start, u64 size)
4569 release_memory_resource(res);
4570 return ret;
4571 }
4572 +
4573 +int add_memory(int nid, u64 start, u64 size)
4574 +{
4575 + int rc;
4576 +
4577 + lock_device_hotplug();
4578 + rc = __add_memory(nid, start, size);
4579 + unlock_device_hotplug();
4580 +
4581 + return rc;
4582 +}
4583 EXPORT_SYMBOL_GPL(add_memory);
4584
4585 #ifdef CONFIG_MEMORY_HOTREMOVE
4586 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
4587 index 281a46aeae61..f6a376a51099 100644
4588 --- a/mm/page-writeback.c
4589 +++ b/mm/page-writeback.c
4590 @@ -2141,6 +2141,13 @@ EXPORT_SYMBOL(tag_pages_for_writeback);
4591 * not miss some pages (e.g., because some other process has cleared TOWRITE
4592 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
4593 * by the process clearing the DIRTY tag (and submitting the page for IO).
4594 + *
4595 + * To avoid deadlocks between range_cyclic writeback and callers that hold
4596 + * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
4597 + * we do not loop back to the start of the file. Doing so causes a page
4598 + * lock/page writeback access order inversion - we should only ever lock
4599 + * multiple pages in ascending page->index order, and looping back to the start
4600 + * of the file violates that rule and causes deadlocks.
4601 */
4602 int write_cache_pages(struct address_space *mapping,
4603 struct writeback_control *wbc, writepage_t writepage,
4604 @@ -2155,7 +2162,6 @@ int write_cache_pages(struct address_space *mapping,
4605 pgoff_t index;
4606 pgoff_t end; /* Inclusive */
4607 pgoff_t done_index;
4608 - int cycled;
4609 int range_whole = 0;
4610 int tag;
4611
4612 @@ -2163,23 +2169,17 @@ int write_cache_pages(struct address_space *mapping,
4613 if (wbc->range_cyclic) {
4614 writeback_index = mapping->writeback_index; /* prev offset */
4615 index = writeback_index;
4616 - if (index == 0)
4617 - cycled = 1;
4618 - else
4619 - cycled = 0;
4620 end = -1;
4621 } else {
4622 index = wbc->range_start >> PAGE_SHIFT;
4623 end = wbc->range_end >> PAGE_SHIFT;
4624 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
4625 range_whole = 1;
4626 - cycled = 1; /* ignore range_cyclic tests */
4627 }
4628 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4629 tag = PAGECACHE_TAG_TOWRITE;
4630 else
4631 tag = PAGECACHE_TAG_DIRTY;
4632 -retry:
4633 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
4634 tag_pages_for_writeback(mapping, index, end);
4635 done_index = index;
4636 @@ -2287,17 +2287,14 @@ continue_unlock:
4637 pagevec_release(&pvec);
4638 cond_resched();
4639 }
4640 - if (!cycled && !done) {
4641 - /*
4642 - * range_cyclic:
4643 - * We hit the last page and there is more work to be done: wrap
4644 - * back to the start of the file
4645 - */
4646 - cycled = 1;
4647 - index = 0;
4648 - end = writeback_index - 1;
4649 - goto retry;
4650 - }
4651 +
4652 + /*
4653 + * If we hit the last page and there is more work to be done: wrap
4654 + * back the index back to the start of the file for the next
4655 + * time we are called.
4656 + */
4657 + if (wbc->range_cyclic && !done)
4658 + done_index = 0;
4659 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
4660 mapping->writeback_index = done_index;
4661
4662 diff --git a/net/core/dev.c b/net/core/dev.c
4663 index 547b4daae5ca..c6fb7e61cb40 100644
4664 --- a/net/core/dev.c
4665 +++ b/net/core/dev.c
4666 @@ -2997,7 +2997,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
4667 }
4668
4669 skb = next;
4670 - if (netif_xmit_stopped(txq) && skb) {
4671 + if (netif_tx_queue_stopped(txq) && skb) {
4672 rc = NETDEV_TX_BUSY;
4673 break;
4674 }
4675 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4676 index ba724576764e..ead1a32c68f7 100644
4677 --- a/net/core/rtnetlink.c
4678 +++ b/net/core/rtnetlink.c
4679 @@ -1724,6 +1724,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4680 if (tb[IFLA_VF_MAC]) {
4681 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
4682
4683 + if (ivm->vf >= INT_MAX)
4684 + return -EINVAL;
4685 err = -EOPNOTSUPP;
4686 if (ops->ndo_set_vf_mac)
4687 err = ops->ndo_set_vf_mac(dev, ivm->vf,
4688 @@ -1735,6 +1737,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4689 if (tb[IFLA_VF_VLAN]) {
4690 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
4691
4692 + if (ivv->vf >= INT_MAX)
4693 + return -EINVAL;
4694 err = -EOPNOTSUPP;
4695 if (ops->ndo_set_vf_vlan)
4696 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
4697 @@ -1767,6 +1771,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4698 if (len == 0)
4699 return -EINVAL;
4700
4701 + if (ivvl[0]->vf >= INT_MAX)
4702 + return -EINVAL;
4703 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
4704 ivvl[0]->qos, ivvl[0]->vlan_proto);
4705 if (err < 0)
4706 @@ -1777,6 +1783,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4707 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
4708 struct ifla_vf_info ivf;
4709
4710 + if (ivt->vf >= INT_MAX)
4711 + return -EINVAL;
4712 err = -EOPNOTSUPP;
4713 if (ops->ndo_get_vf_config)
4714 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
4715 @@ -1795,6 +1803,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4716 if (tb[IFLA_VF_RATE]) {
4717 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
4718
4719 + if (ivt->vf >= INT_MAX)
4720 + return -EINVAL;
4721 err = -EOPNOTSUPP;
4722 if (ops->ndo_set_vf_rate)
4723 err = ops->ndo_set_vf_rate(dev, ivt->vf,
4724 @@ -1807,6 +1817,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4725 if (tb[IFLA_VF_SPOOFCHK]) {
4726 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
4727
4728 + if (ivs->vf >= INT_MAX)
4729 + return -EINVAL;
4730 err = -EOPNOTSUPP;
4731 if (ops->ndo_set_vf_spoofchk)
4732 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
4733 @@ -1818,6 +1830,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4734 if (tb[IFLA_VF_LINK_STATE]) {
4735 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
4736
4737 + if (ivl->vf >= INT_MAX)
4738 + return -EINVAL;
4739 err = -EOPNOTSUPP;
4740 if (ops->ndo_set_vf_link_state)
4741 err = ops->ndo_set_vf_link_state(dev, ivl->vf,
4742 @@ -1831,6 +1845,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4743
4744 err = -EOPNOTSUPP;
4745 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
4746 + if (ivrssq_en->vf >= INT_MAX)
4747 + return -EINVAL;
4748 if (ops->ndo_set_vf_rss_query_en)
4749 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
4750 ivrssq_en->setting);
4751 @@ -1841,6 +1857,8 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4752 if (tb[IFLA_VF_TRUST]) {
4753 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]);
4754
4755 + if (ivt->vf >= INT_MAX)
4756 + return -EINVAL;
4757 err = -EOPNOTSUPP;
4758 if (ops->ndo_set_vf_trust)
4759 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting);
4760 @@ -1851,15 +1869,18 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
4761 if (tb[IFLA_VF_IB_NODE_GUID]) {
4762 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
4763
4764 + if (ivt->vf >= INT_MAX)
4765 + return -EINVAL;
4766 if (!ops->ndo_set_vf_guid)
4767 return -EOPNOTSUPP;
4768 -
4769 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
4770 }
4771
4772 if (tb[IFLA_VF_IB_PORT_GUID]) {
4773 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
4774
4775 + if (ivt->vf >= INT_MAX)
4776 + return -EINVAL;
4777 if (!ops->ndo_set_vf_guid)
4778 return -EOPNOTSUPP;
4779
4780 diff --git a/net/core/sock.c b/net/core/sock.c
4781 index d22493351407..9178c1654375 100644
4782 --- a/net/core/sock.c
4783 +++ b/net/core/sock.c
4784 @@ -945,10 +945,12 @@ set_rcvbuf:
4785 clear_bit(SOCK_PASSSEC, &sock->flags);
4786 break;
4787 case SO_MARK:
4788 - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
4789 + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
4790 ret = -EPERM;
4791 - else
4792 + } else if (val != sk->sk_mark) {
4793 sk->sk_mark = val;
4794 + sk_dst_reset(sk);
4795 + }
4796 break;
4797
4798 case SO_RXQ_OVFL:
4799 diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
4800 index 03a696d3bcd9..4a88c4eb2301 100644
4801 --- a/net/l2tp/l2tp_ip.c
4802 +++ b/net/l2tp/l2tp_ip.c
4803 @@ -116,6 +116,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
4804 unsigned char *ptr, *optr;
4805 struct l2tp_session *session;
4806 struct l2tp_tunnel *tunnel = NULL;
4807 + struct iphdr *iph;
4808 int length;
4809
4810 if (!pskb_may_pull(skb, 4))
4811 @@ -174,24 +175,17 @@ pass_up:
4812 goto discard;
4813
4814 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
4815 - tunnel = l2tp_tunnel_find(net, tunnel_id);
4816 - if (tunnel) {
4817 - sk = tunnel->sock;
4818 - sock_hold(sk);
4819 - } else {
4820 - struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
4821 -
4822 - read_lock_bh(&l2tp_ip_lock);
4823 - sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
4824 - inet_iif(skb), tunnel_id);
4825 - if (!sk) {
4826 - read_unlock_bh(&l2tp_ip_lock);
4827 - goto discard;
4828 - }
4829 + iph = (struct iphdr *)skb_network_header(skb);
4830
4831 - sock_hold(sk);
4832 + read_lock_bh(&l2tp_ip_lock);
4833 + sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
4834 + tunnel_id);
4835 + if (!sk) {
4836 read_unlock_bh(&l2tp_ip_lock);
4837 + goto discard;
4838 }
4839 + sock_hold(sk);
4840 + read_unlock_bh(&l2tp_ip_lock);
4841
4842 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
4843 goto discard_put;
4844 diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
4845 index 8d412b9b0214..423cb095ad37 100644
4846 --- a/net/l2tp/l2tp_ip6.c
4847 +++ b/net/l2tp/l2tp_ip6.c
4848 @@ -128,6 +128,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
4849 unsigned char *ptr, *optr;
4850 struct l2tp_session *session;
4851 struct l2tp_tunnel *tunnel = NULL;
4852 + struct ipv6hdr *iph;
4853 int length;
4854
4855 if (!pskb_may_pull(skb, 4))
4856 @@ -187,24 +188,17 @@ pass_up:
4857 goto discard;
4858
4859 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
4860 - tunnel = l2tp_tunnel_find(net, tunnel_id);
4861 - if (tunnel) {
4862 - sk = tunnel->sock;
4863 - sock_hold(sk);
4864 - } else {
4865 - struct ipv6hdr *iph = ipv6_hdr(skb);
4866 -
4867 - read_lock_bh(&l2tp_ip6_lock);
4868 - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
4869 - inet6_iif(skb), tunnel_id);
4870 - if (!sk) {
4871 - read_unlock_bh(&l2tp_ip6_lock);
4872 - goto discard;
4873 - }
4874 + iph = ipv6_hdr(skb);
4875
4876 - sock_hold(sk);
4877 + read_lock_bh(&l2tp_ip6_lock);
4878 + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
4879 + inet6_iif(skb), tunnel_id);
4880 + if (!sk) {
4881 read_unlock_bh(&l2tp_ip6_lock);
4882 + goto discard;
4883 }
4884 + sock_hold(sk);
4885 + read_unlock_bh(&l2tp_ip6_lock);
4886
4887 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
4888 goto discard_put;
4889 diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
4890 index cf9b2fe8eac6..9bebf4dc1c8e 100644
4891 --- a/net/sched/act_pedit.c
4892 +++ b/net/sched/act_pedit.c
4893 @@ -54,13 +54,14 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
4894 if (tb[TCA_PEDIT_PARMS] == NULL)
4895 return -EINVAL;
4896 parm = nla_data(tb[TCA_PEDIT_PARMS]);
4897 + if (!parm->nkeys)
4898 + return -EINVAL;
4899 +
4900 ksize = parm->nkeys * sizeof(struct tc_pedit_key);
4901 if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize)
4902 return -EINVAL;
4903
4904 if (!tcf_hash_check(tn, parm->index, a, bind)) {
4905 - if (!parm->nkeys)
4906 - return -EINVAL;
4907 ret = tcf_hash_create(tn, parm->index, est, a,
4908 &act_pedit_ops, bind, false);
4909 if (ret)
4910 diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
4911 index 1d74d653e6c0..ad0dcb69395d 100644
4912 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c
4913 +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
4914 @@ -63,6 +63,7 @@
4915 #include <linux/sunrpc/gss_krb5.h>
4916 #include <linux/random.h>
4917 #include <linux/crypto.h>
4918 +#include <linux/atomic.h>
4919
4920 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
4921 # define RPCDBG_FACILITY RPCDBG_AUTH
4922 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4923 index 280fb3178708..f3f05148922a 100644
4924 --- a/net/sunrpc/xprtsock.c
4925 +++ b/net/sunrpc/xprtsock.c
4926 @@ -124,7 +124,7 @@ static struct ctl_table xs_tunables_table[] = {
4927 .mode = 0644,
4928 .proc_handler = proc_dointvec_minmax,
4929 .extra1 = &xprt_min_resvport_limit,
4930 - .extra2 = &xprt_max_resvport
4931 + .extra2 = &xprt_max_resvport_limit
4932 },
4933 {
4934 .procname = "max_resvport",
4935 @@ -132,7 +132,7 @@ static struct ctl_table xs_tunables_table[] = {
4936 .maxlen = sizeof(unsigned int),
4937 .mode = 0644,
4938 .proc_handler = proc_dointvec_minmax,
4939 - .extra1 = &xprt_min_resvport,
4940 + .extra1 = &xprt_min_resvport_limit,
4941 .extra2 = &xprt_max_resvport_limit
4942 },
4943 {
4944 @@ -1737,11 +1737,17 @@ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
4945 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
4946 }
4947
4948 -static unsigned short xs_get_random_port(void)
4949 +static int xs_get_random_port(void)
4950 {
4951 - unsigned short range = xprt_max_resvport - xprt_min_resvport + 1;
4952 - unsigned short rand = (unsigned short) prandom_u32() % range;
4953 - return rand + xprt_min_resvport;
4954 + unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
4955 + unsigned short range;
4956 + unsigned short rand;
4957 +
4958 + if (max < min)
4959 + return -EADDRINUSE;
4960 + range = max - min + 1;
4961 + rand = (unsigned short) prandom_u32() % range;
4962 + return rand + min;
4963 }
4964
4965 /**
4966 @@ -1798,9 +1804,9 @@ static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
4967 transport->srcport = xs_sock_getport(sock);
4968 }
4969
4970 -static unsigned short xs_get_srcport(struct sock_xprt *transport)
4971 +static int xs_get_srcport(struct sock_xprt *transport)
4972 {
4973 - unsigned short port = transport->srcport;
4974 + int port = transport->srcport;
4975
4976 if (port == 0 && transport->xprt.resvport)
4977 port = xs_get_random_port();
4978 @@ -1821,7 +1827,7 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
4979 {
4980 struct sockaddr_storage myaddr;
4981 int err, nloop = 0;
4982 - unsigned short port = xs_get_srcport(transport);
4983 + int port = xs_get_srcport(transport);
4984 unsigned short last;
4985
4986 /*
4987 @@ -1839,8 +1845,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
4988 * transport->xprt.resvport == 1) xs_get_srcport above will
4989 * ensure that port is non-zero and we will bind as needed.
4990 */
4991 - if (port == 0)
4992 - return 0;
4993 + if (port <= 0)
4994 + return port;
4995
4996 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
4997 do {
4998 @@ -3223,12 +3229,8 @@ static int param_set_uint_minmax(const char *val,
4999
5000 static int param_set_portnr(const char *val, const struct kernel_param *kp)
5001 {
5002 - if (kp->arg == &xprt_min_resvport)
5003 - return param_set_uint_minmax(val, kp,
5004 - RPC_MIN_RESVPORT,
5005 - xprt_max_resvport);
5006 return param_set_uint_minmax(val, kp,
5007 - xprt_min_resvport,
5008 + RPC_MIN_RESVPORT,
5009 RPC_MAX_RESVPORT);
5010 }
5011
5012 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
5013 index cecf51a5aec4..32ae82a5596d 100644
5014 --- a/net/unix/af_unix.c
5015 +++ b/net/unix/af_unix.c
5016 @@ -224,6 +224,8 @@ static inline void unix_release_addr(struct unix_address *addr)
5017
5018 static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
5019 {
5020 + *hashp = 0;
5021 +
5022 if (len <= sizeof(short) || len > sizeof(*sunaddr))
5023 return -EINVAL;
5024 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
5025 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
5026 index 48d6dca471c6..6c8daf5b391f 100644
5027 --- a/sound/firewire/isight.c
5028 +++ b/sound/firewire/isight.c
5029 @@ -639,7 +639,7 @@ static int isight_probe(struct fw_unit *unit,
5030 if (!isight->audio_base) {
5031 dev_err(&unit->device, "audio unit base not found\n");
5032 err = -ENXIO;
5033 - goto err_unit;
5034 + goto error;
5035 }
5036 fw_iso_resources_init(&isight->resources, unit);
5037
5038 @@ -668,12 +668,12 @@ static int isight_probe(struct fw_unit *unit,
5039 dev_set_drvdata(&unit->device, isight);
5040
5041 return 0;
5042 -
5043 -err_unit:
5044 - fw_unit_put(isight->unit);
5045 - mutex_destroy(&isight->mutex);
5046 error:
5047 snd_card_free(card);
5048 +
5049 + mutex_destroy(&isight->mutex);
5050 + fw_unit_put(isight->unit);
5051 +
5052 return err;
5053 }
5054
5055 diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
5056 index 7e21621e492a..7fd1b4000883 100644
5057 --- a/sound/i2c/cs8427.c
5058 +++ b/sound/i2c/cs8427.c
5059 @@ -118,7 +118,7 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device,
5060 struct cs8427 *chip = device->private_data;
5061 char *hw_data = udata ?
5062 chip->playback.hw_udata : chip->playback.hw_status;
5063 - char data[32];
5064 + unsigned char data[32];
5065 int err, idx;
5066
5067 if (!memcmp(hw_data, ndata, count))
5068 diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
5069 index 1e76869dd488..863e04809a6b 100644
5070 --- a/sound/soc/tegra/tegra_sgtl5000.c
5071 +++ b/sound/soc/tegra/tegra_sgtl5000.c
5072 @@ -152,14 +152,14 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
5073 dev_err(&pdev->dev,
5074 "Property 'nvidia,i2s-controller' missing/invalid\n");
5075 ret = -EINVAL;
5076 - goto err;
5077 + goto err_put_codec_of_node;
5078 }
5079
5080 tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node;
5081
5082 ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
5083 if (ret)
5084 - goto err;
5085 + goto err_put_cpu_of_node;
5086
5087 ret = snd_soc_register_card(card);
5088 if (ret) {
5089 @@ -172,6 +172,13 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
5090
5091 err_fini_utils:
5092 tegra_asoc_utils_fini(&machine->util_data);
5093 +err_put_cpu_of_node:
5094 + of_node_put(tegra_sgtl5000_dai.cpu_of_node);
5095 + tegra_sgtl5000_dai.cpu_of_node = NULL;
5096 + tegra_sgtl5000_dai.platform_of_node = NULL;
5097 +err_put_codec_of_node:
5098 + of_node_put(tegra_sgtl5000_dai.codec_of_node);
5099 + tegra_sgtl5000_dai.codec_of_node = NULL;
5100 err:
5101 return ret;
5102 }
5103 @@ -186,6 +193,12 @@ static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
5104
5105 tegra_asoc_utils_fini(&machine->util_data);
5106
5107 + of_node_put(tegra_sgtl5000_dai.cpu_of_node);
5108 + tegra_sgtl5000_dai.cpu_of_node = NULL;
5109 + tegra_sgtl5000_dai.platform_of_node = NULL;
5110 + of_node_put(tegra_sgtl5000_dai.codec_of_node);
5111 + tegra_sgtl5000_dai.codec_of_node = NULL;
5112 +
5113 return ret;
5114 }
5115
5116 diff --git a/tools/gpio/Build b/tools/gpio/Build
5117 index 620c1937d957..4141f35837db 100644
5118 --- a/tools/gpio/Build
5119 +++ b/tools/gpio/Build
5120 @@ -1,3 +1,4 @@
5121 +gpio-utils-y += gpio-utils.o
5122 lsgpio-y += lsgpio.o gpio-utils.o
5123 gpio-hammer-y += gpio-hammer.o gpio-utils.o
5124 gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
5125 diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
5126 index 250a891e6ef0..359dd5d11c81 100644
5127 --- a/tools/gpio/Makefile
5128 +++ b/tools/gpio/Makefile
5129 @@ -32,11 +32,15 @@ $(OUTPUT)include/linux/gpio.h: ../../include/uapi/linux/gpio.h
5130
5131 prepare: $(OUTPUT)include/linux/gpio.h
5132
5133 +GPIO_UTILS_IN := $(output)gpio-utils-in.o
5134 +$(GPIO_UTILS_IN): prepare FORCE
5135 + $(Q)$(MAKE) $(build)=gpio-utils
5136 +
5137 #
5138 # lsgpio
5139 #
5140 LSGPIO_IN := $(OUTPUT)lsgpio-in.o
5141 -$(LSGPIO_IN): prepare FORCE
5142 +$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
5143 $(Q)$(MAKE) $(build)=lsgpio
5144 $(OUTPUT)lsgpio: $(LSGPIO_IN)
5145 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
5146 @@ -45,7 +49,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
5147 # gpio-hammer
5148 #
5149 GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
5150 -$(GPIO_HAMMER_IN): prepare FORCE
5151 +$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
5152 $(Q)$(MAKE) $(build)=gpio-hammer
5153 $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
5154 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
5155 @@ -54,7 +58,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
5156 # gpio-event-mon
5157 #
5158 GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
5159 -$(GPIO_EVENT_MON_IN): prepare FORCE
5160 +$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
5161 $(Q)$(MAKE) $(build)=gpio-event-mon
5162 $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
5163 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
5164 diff --git a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
5165 index a3d2c62fd805..0a3ad5dd1e8b 100644
5166 --- a/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
5167 +++ b/tools/objtool/arch/x86/tools/gen-insn-attr-x86.awk
5168 @@ -68,7 +68,7 @@ BEGIN {
5169
5170 lprefix1_expr = "\\((66|!F3)\\)"
5171 lprefix2_expr = "\\(F3\\)"
5172 - lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
5173 + lprefix3_expr = "\\((F2|!F3|66&F2)\\)"
5174 lprefix_expr = "\\((66|F2|F3)\\)"
5175 max_lprefix = 4
5176
5177 @@ -256,7 +256,7 @@ function convert_operands(count,opnd, i,j,imm,mod)
5178 return add_flags(imm, mod)
5179 }
5180
5181 -/^[0-9a-f]+\:/ {
5182 +/^[0-9a-f]+:/ {
5183 if (NR == 1)
5184 next
5185 # get index
5186 diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
5187 index 7ff46be908f0..d426fec3b1d3 100644
5188 --- a/tools/power/acpi/tools/acpidump/apmain.c
5189 +++ b/tools/power/acpi/tools/acpidump/apmain.c
5190 @@ -139,7 +139,7 @@ static int ap_insert_action(char *argument, u32 to_be_done)
5191
5192 current_action++;
5193 if (current_action > AP_MAX_ACTIONS) {
5194 - fprintf(stderr, "Too many table options (max %u)\n",
5195 + fprintf(stderr, "Too many table options (max %d)\n",
5196 AP_MAX_ACTIONS);
5197 return (-1);
5198 }
5199 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
5200 index 231bcd2c4eb5..1e7ac6f3362f 100644
5201 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
5202 +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
5203 @@ -71,8 +71,11 @@ test_badarg "\$stackp" "\$stack0+10" "\$stack1-10"
5204 echo "r ${PROBEFUNC} \$retval" > kprobe_events
5205 ! echo "p ${PROBEFUNC} \$retval" > kprobe_events
5206
5207 +# $comm was introduced in 4.8, older kernels reject it.
5208 +if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then
5209 : "Comm access"
5210 test_goodarg "\$comm"
5211 +fi
5212
5213 : "Indirect memory access"
5214 test_goodarg "+0(${GOODREG})" "-0(${GOODREG})" "+10(\$stack)" \
5215 diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
5216 index 6ff7b601f854..4bb905925b0e 100644
5217 --- a/tools/usb/usbip/libsrc/usbip_host_common.c
5218 +++ b/tools/usb/usbip/libsrc/usbip_host_common.c
5219 @@ -43,7 +43,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
5220 int size;
5221 int fd;
5222 int length;
5223 - char status;
5224 + char status[2] = { 0 };
5225 int value = 0;
5226
5227 size = snprintf(status_attr_path, sizeof(status_attr_path),
5228 @@ -61,15 +61,15 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
5229 return -1;
5230 }
5231
5232 - length = read(fd, &status, 1);
5233 + length = read(fd, status, 1);
5234 if (length < 0) {
5235 err("error reading attribute %s", status_attr_path);
5236 close(fd);
5237 return -1;
5238 }
5239
5240 - value = atoi(&status);
5241 -
5242 + value = atoi(status);
5243 + close(fd);
5244 return value;
5245 }
5246
5247 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5248 index 0fc93519e63e..c0dff5337a50 100644
5249 --- a/virt/kvm/kvm_main.c
5250 +++ b/virt/kvm/kvm_main.c
5251 @@ -131,10 +131,30 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
5252 {
5253 }
5254
5255 +bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
5256 +{
5257 + /*
5258 + * The metadata used by is_zone_device_page() to determine whether or
5259 + * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
5260 + * the device has been pinned, e.g. by get_user_pages(). WARN if the
5261 + * page_count() is zero to help detect bad usage of this helper.
5262 + */
5263 + if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
5264 + return false;
5265 +
5266 + return is_zone_device_page(pfn_to_page(pfn));
5267 +}
5268 +
5269 bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
5270 {
5271 + /*
5272 + * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
5273 + * perspective they are "normal" pages, albeit with slightly different
5274 + * usage rules.
5275 + */
5276 if (pfn_valid(pfn))
5277 - return PageReserved(pfn_to_page(pfn));
5278 + return PageReserved(pfn_to_page(pfn)) &&
5279 + !kvm_is_zone_device_pfn(pfn);
5280
5281 return true;
5282 }
5283 @@ -1758,7 +1778,7 @@ static void kvm_release_pfn_dirty(kvm_pfn_t pfn)
5284
5285 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
5286 {
5287 - if (!kvm_is_reserved_pfn(pfn)) {
5288 + if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
5289 struct page *page = pfn_to_page(pfn);
5290
5291 if (!PageReserved(page))
5292 @@ -1769,7 +1789,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
5293
5294 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
5295 {
5296 - if (!kvm_is_reserved_pfn(pfn))
5297 + if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
5298 mark_page_accessed(pfn_to_page(pfn));
5299 }
5300 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);