Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0127-4.4.28-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2863 - (show annotations) (download)
Mon Mar 27 13:49:08 2017 UTC (7 years, 1 month ago) by niro
File size: 161566 byte(s)
linux-4.4.28
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index 0e4102ae1a61..c360f80c3473 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -1371,7 +1371,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
7 controllers
8 i8042.notimeout [HW] Ignore timeout condition signalled by controller
9 - i8042.reset [HW] Reset the controller during init and cleanup
10 + i8042.reset [HW] Reset the controller during init, cleanup and
11 + suspend-to-ram transitions, only during s2r
12 + transitions, or never reset
13 + Format: { 1 | Y | y | 0 | N | n }
14 + 1, Y, y: always reset controller
15 + 0, N, n: don't ever reset controller
16 + Default: only on s2r transitions on x86; most other
17 + architectures force reset to be always executed
18 i8042.unlock [HW] Unlock (ignore) the keylock
19 i8042.kbdreset [HW] Reset device connected to KBD port
20
21 diff --git a/Makefile b/Makefile
22 index b6ee4ce561f8..391294301aaf 100644
23 --- a/Makefile
24 +++ b/Makefile
25 @@ -1,6 +1,6 @@
26 VERSION = 4
27 PATCHLEVEL = 4
28 -SUBLEVEL = 27
29 +SUBLEVEL = 28
30 EXTRAVERSION =
31 NAME = Blurry Fish Butt
32
33 diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
34 index 004b7f0bc76c..257b8699efde 100644
35 --- a/arch/arc/kernel/signal.c
36 +++ b/arch/arc/kernel/signal.c
37 @@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
38 struct user_regs_struct uregs;
39
40 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
41 - if (!err)
42 - set_current_blocked(&set);
43 -
44 err |= __copy_from_user(&uregs.scratch,
45 &(sf->uc.uc_mcontext.regs.scratch),
46 sizeof(sf->uc.uc_mcontext.regs.scratch));
47 + if (err)
48 + return err;
49
50 + set_current_blocked(&set);
51 regs->bta = uregs.scratch.bta;
52 regs->lp_start = uregs.scratch.lp_start;
53 regs->lp_end = uregs.scratch.lp_end;
54 @@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
55 regs->r0 = uregs.scratch.r0;
56 regs->sp = uregs.scratch.sp;
57
58 - return err;
59 + return 0;
60 }
61
62 static inline int is_do_ss_needed(unsigned int magic)
63 diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
64 index 0a456bef8c79..8a336852eeba 100644
65 --- a/arch/arm64/include/asm/percpu.h
66 +++ b/arch/arm64/include/asm/percpu.h
67 @@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
68 \
69 switch (size) { \
70 case 1: \
71 - do { \
72 - asm ("//__per_cpu_" #op "_1\n" \
73 - "ldxrb %w[ret], %[ptr]\n" \
74 + asm ("//__per_cpu_" #op "_1\n" \
75 + "1: ldxrb %w[ret], %[ptr]\n" \
76 #asm_op " %w[ret], %w[ret], %w[val]\n" \
77 - "stxrb %w[loop], %w[ret], %[ptr]\n" \
78 - : [loop] "=&r" (loop), [ret] "=&r" (ret), \
79 - [ptr] "+Q"(*(u8 *)ptr) \
80 - : [val] "Ir" (val)); \
81 - } while (loop); \
82 + " stxrb %w[loop], %w[ret], %[ptr]\n" \
83 + " cbnz %w[loop], 1b" \
84 + : [loop] "=&r" (loop), [ret] "=&r" (ret), \
85 + [ptr] "+Q"(*(u8 *)ptr) \
86 + : [val] "Ir" (val)); \
87 break; \
88 case 2: \
89 - do { \
90 - asm ("//__per_cpu_" #op "_2\n" \
91 - "ldxrh %w[ret], %[ptr]\n" \
92 + asm ("//__per_cpu_" #op "_2\n" \
93 + "1: ldxrh %w[ret], %[ptr]\n" \
94 #asm_op " %w[ret], %w[ret], %w[val]\n" \
95 - "stxrh %w[loop], %w[ret], %[ptr]\n" \
96 - : [loop] "=&r" (loop), [ret] "=&r" (ret), \
97 - [ptr] "+Q"(*(u16 *)ptr) \
98 - : [val] "Ir" (val)); \
99 - } while (loop); \
100 + " stxrh %w[loop], %w[ret], %[ptr]\n" \
101 + " cbnz %w[loop], 1b" \
102 + : [loop] "=&r" (loop), [ret] "=&r" (ret), \
103 + [ptr] "+Q"(*(u16 *)ptr) \
104 + : [val] "Ir" (val)); \
105 break; \
106 case 4: \
107 - do { \
108 - asm ("//__per_cpu_" #op "_4\n" \
109 - "ldxr %w[ret], %[ptr]\n" \
110 + asm ("//__per_cpu_" #op "_4\n" \
111 + "1: ldxr %w[ret], %[ptr]\n" \
112 #asm_op " %w[ret], %w[ret], %w[val]\n" \
113 - "stxr %w[loop], %w[ret], %[ptr]\n" \
114 - : [loop] "=&r" (loop), [ret] "=&r" (ret), \
115 - [ptr] "+Q"(*(u32 *)ptr) \
116 - : [val] "Ir" (val)); \
117 - } while (loop); \
118 + " stxr %w[loop], %w[ret], %[ptr]\n" \
119 + " cbnz %w[loop], 1b" \
120 + : [loop] "=&r" (loop), [ret] "=&r" (ret), \
121 + [ptr] "+Q"(*(u32 *)ptr) \
122 + : [val] "Ir" (val)); \
123 break; \
124 case 8: \
125 - do { \
126 - asm ("//__per_cpu_" #op "_8\n" \
127 - "ldxr %[ret], %[ptr]\n" \
128 + asm ("//__per_cpu_" #op "_8\n" \
129 + "1: ldxr %[ret], %[ptr]\n" \
130 #asm_op " %[ret], %[ret], %[val]\n" \
131 - "stxr %w[loop], %[ret], %[ptr]\n" \
132 - : [loop] "=&r" (loop), [ret] "=&r" (ret), \
133 - [ptr] "+Q"(*(u64 *)ptr) \
134 - : [val] "Ir" (val)); \
135 - } while (loop); \
136 + " stxr %w[loop], %[ret], %[ptr]\n" \
137 + " cbnz %w[loop], 1b" \
138 + : [loop] "=&r" (loop), [ret] "=&r" (ret), \
139 + [ptr] "+Q"(*(u64 *)ptr) \
140 + : [val] "Ir" (val)); \
141 break; \
142 default: \
143 BUILD_BUG(); \
144 @@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
145
146 switch (size) {
147 case 1:
148 - do {
149 - asm ("//__percpu_xchg_1\n"
150 - "ldxrb %w[ret], %[ptr]\n"
151 - "stxrb %w[loop], %w[val], %[ptr]\n"
152 - : [loop] "=&r"(loop), [ret] "=&r"(ret),
153 - [ptr] "+Q"(*(u8 *)ptr)
154 - : [val] "r" (val));
155 - } while (loop);
156 + asm ("//__percpu_xchg_1\n"
157 + "1: ldxrb %w[ret], %[ptr]\n"
158 + " stxrb %w[loop], %w[val], %[ptr]\n"
159 + " cbnz %w[loop], 1b"
160 + : [loop] "=&r"(loop), [ret] "=&r"(ret),
161 + [ptr] "+Q"(*(u8 *)ptr)
162 + : [val] "r" (val));
163 break;
164 case 2:
165 - do {
166 - asm ("//__percpu_xchg_2\n"
167 - "ldxrh %w[ret], %[ptr]\n"
168 - "stxrh %w[loop], %w[val], %[ptr]\n"
169 - : [loop] "=&r"(loop), [ret] "=&r"(ret),
170 - [ptr] "+Q"(*(u16 *)ptr)
171 - : [val] "r" (val));
172 - } while (loop);
173 + asm ("//__percpu_xchg_2\n"
174 + "1: ldxrh %w[ret], %[ptr]\n"
175 + " stxrh %w[loop], %w[val], %[ptr]\n"
176 + " cbnz %w[loop], 1b"
177 + : [loop] "=&r"(loop), [ret] "=&r"(ret),
178 + [ptr] "+Q"(*(u16 *)ptr)
179 + : [val] "r" (val));
180 break;
181 case 4:
182 - do {
183 - asm ("//__percpu_xchg_4\n"
184 - "ldxr %w[ret], %[ptr]\n"
185 - "stxr %w[loop], %w[val], %[ptr]\n"
186 - : [loop] "=&r"(loop), [ret] "=&r"(ret),
187 - [ptr] "+Q"(*(u32 *)ptr)
188 - : [val] "r" (val));
189 - } while (loop);
190 + asm ("//__percpu_xchg_4\n"
191 + "1: ldxr %w[ret], %[ptr]\n"
192 + " stxr %w[loop], %w[val], %[ptr]\n"
193 + " cbnz %w[loop], 1b"
194 + : [loop] "=&r"(loop), [ret] "=&r"(ret),
195 + [ptr] "+Q"(*(u32 *)ptr)
196 + : [val] "r" (val));
197 break;
198 case 8:
199 - do {
200 - asm ("//__percpu_xchg_8\n"
201 - "ldxr %[ret], %[ptr]\n"
202 - "stxr %w[loop], %[val], %[ptr]\n"
203 - : [loop] "=&r"(loop), [ret] "=&r"(ret),
204 - [ptr] "+Q"(*(u64 *)ptr)
205 - : [val] "r" (val));
206 - } while (loop);
207 + asm ("//__percpu_xchg_8\n"
208 + "1: ldxr %[ret], %[ptr]\n"
209 + " stxr %w[loop], %[val], %[ptr]\n"
210 + " cbnz %w[loop], 1b"
211 + : [loop] "=&r"(loop), [ret] "=&r"(ret),
212 + [ptr] "+Q"(*(u64 *)ptr)
213 + : [val] "r" (val));
214 break;
215 default:
216 BUILD_BUG();
217 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
218 index b685257926f0..20ceb5edf7b8 100644
219 --- a/arch/arm64/kernel/head.S
220 +++ b/arch/arm64/kernel/head.S
221 @@ -518,8 +518,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
222 b.lt 4f // Skip if no PMU present
223 mrs x0, pmcr_el0 // Disable debug access traps
224 ubfx x0, x0, #11, #5 // to EL2 and allow access to
225 - msr mdcr_el2, x0 // all PMU counters from EL1
226 4:
227 + csel x0, xzr, x0, lt // all PMU counters from EL1
228 + msr mdcr_el2, x0 // (if they exist)
229
230 /* Stage-2 translation */
231 msr vttbr_el2, xzr
232 diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
233 index 470e365f04ea..8ff0a70865f6 100644
234 --- a/arch/metag/include/asm/atomic.h
235 +++ b/arch/metag/include/asm/atomic.h
236 @@ -39,11 +39,10 @@
237 #define atomic_dec(v) atomic_sub(1, (v))
238
239 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
240 +#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
241
242 #endif
243
244 -#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
245 -
246 #include <asm-generic/atomic64.h>
247
248 #endif /* __ASM_METAG_ATOMIC_H */
249 diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
250 index f6fc6aac5496..b6578611dddb 100644
251 --- a/arch/mips/include/asm/ptrace.h
252 +++ b/arch/mips/include/asm/ptrace.h
253 @@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs)
254
255 static inline long regs_return_value(struct pt_regs *regs)
256 {
257 - if (is_syscall_success(regs))
258 + if (is_syscall_success(regs) || !user_mode(regs))
259 return regs->regs[2];
260 else
261 return -regs->regs[2];
262 diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
263 index 090393aa0f20..6c7d78546eee 100644
264 --- a/arch/mips/vdso/Makefile
265 +++ b/arch/mips/vdso/Makefile
266 @@ -75,7 +75,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
267 $(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
268 $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
269
270 -$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
271 +$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
272
273 $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
274 $(call if_changed,vdsold)
275 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
276 index 291cee28ccb6..c2c43f714684 100644
277 --- a/arch/parisc/include/asm/pgtable.h
278 +++ b/arch/parisc/include/asm/pgtable.h
279 @@ -83,10 +83,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
280 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
281
282 /* This is the size of the initially mapped kernel memory */
283 -#ifdef CONFIG_64BIT
284 -#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
285 +#if defined(CONFIG_64BIT)
286 +#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
287 #else
288 -#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
289 +#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
290 #endif
291 #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
292
293 diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
294 index f7ea626e29c9..81d6f6391944 100644
295 --- a/arch/parisc/kernel/setup.c
296 +++ b/arch/parisc/kernel/setup.c
297 @@ -38,6 +38,7 @@
298 #include <linux/export.h>
299
300 #include <asm/processor.h>
301 +#include <asm/sections.h>
302 #include <asm/pdc.h>
303 #include <asm/led.h>
304 #include <asm/machdep.h> /* for pa7300lc_init() proto */
305 @@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
306 #endif
307 printk(KERN_CONT ".\n");
308
309 + /*
310 + * Check if initial kernel page mappings are sufficient.
311 + * panic early if not, else we may access kernel functions
312 + * and variables which can't be reached.
313 + */
314 + if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
315 + panic("KERNEL_INITIAL_ORDER too small!");
316
317 pdc_console_init();
318
319 diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
320 index 308f29081d46..60771df10fde 100644
321 --- a/arch/parisc/kernel/vmlinux.lds.S
322 +++ b/arch/parisc/kernel/vmlinux.lds.S
323 @@ -88,8 +88,9 @@ SECTIONS
324 /* Start of data section */
325 _sdata = .;
326
327 - RO_DATA_SECTION(8)
328 -
329 + /* Architecturally we need to keep __gp below 0x1000000 and thus
330 + * in front of RO_DATA_SECTION() which stores lots of tracepoint
331 + * and ftrace symbols. */
332 #ifdef CONFIG_64BIT
333 . = ALIGN(16);
334 /* Linkage tables */
335 @@ -104,6 +105,8 @@ SECTIONS
336 }
337 #endif
338
339 + RO_DATA_SECTION(8)
340 +
341 /* unwind info */
342 .PARISC.unwind : {
343 __start___unwind = .;
344 diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
345 index 2f01c4a0d8a0..7612eeb31da1 100644
346 --- a/arch/powerpc/kernel/vdso64/datapage.S
347 +++ b/arch/powerpc/kernel/vdso64/datapage.S
348 @@ -59,7 +59,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
349 bl V_LOCAL_FUNC(__get_datapage)
350 mtlr r12
351 addi r3,r3,CFG_SYSCALL_MAP64
352 - cmpli cr0,r4,0
353 + cmpldi cr0,r4,0
354 crclr cr0*4+so
355 beqlr
356 li r0,__NR_syscalls
357 diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
358 index a76b4af37ef2..382021324883 100644
359 --- a/arch/powerpc/kernel/vdso64/gettimeofday.S
360 +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
361 @@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
362 bne cr0,99f
363
364 li r3,0
365 - cmpli cr0,r4,0
366 + cmpldi cr0,r4,0
367 crclr cr0*4+so
368 beqlr
369 lis r5,CLOCK_REALTIME_RES@h
370 diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
371 index f09899e35991..7b22624f332c 100644
372 --- a/arch/powerpc/lib/copyuser_64.S
373 +++ b/arch/powerpc/lib/copyuser_64.S
374 @@ -359,6 +359,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
375 addi r3,r3,8
376 171:
377 177:
378 +179:
379 addi r3,r3,8
380 370:
381 372:
382 @@ -373,7 +374,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
383 173:
384 174:
385 175:
386 -179:
387 181:
388 184:
389 186:
390 diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
391 index 6527882ce05e..ddfd2740a1b5 100644
392 --- a/arch/powerpc/mm/copro_fault.c
393 +++ b/arch/powerpc/mm/copro_fault.c
394 @@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
395 switch (REGION_ID(ea)) {
396 case USER_REGION_ID:
397 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
398 + if (mm == NULL)
399 + return 1;
400 psize = get_slice_psize(mm, ea);
401 ssize = user_segment_size(ea);
402 vsid = get_vsid(mm->context.id, ea, ssize);
403 diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
404 index 2ba602591a20..ba0cae69a396 100644
405 --- a/arch/powerpc/platforms/powernv/eeh-powernv.c
406 +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
407 @@ -1163,7 +1163,7 @@ static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
408 return;
409 }
410
411 - switch (data->type) {
412 + switch (be16_to_cpu(data->type)) {
413 case OPAL_P7IOC_DIAG_TYPE_RGC:
414 pr_info("P7IOC diag-data for RGC\n\n");
415 pnv_eeh_dump_hub_diag_common(data);
416 @@ -1395,7 +1395,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
417
418 /* Try best to clear it */
419 opal_pci_eeh_freeze_clear(phb->opal_id,
420 - frozen_pe_no,
421 + be64_to_cpu(frozen_pe_no),
422 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
423 ret = EEH_NEXT_ERR_NONE;
424 } else if ((*pe)->state & EEH_PE_ISOLATED ||
425 diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
426 index ad8c3f4a5e0b..dd5e0f3b1b5d 100644
427 --- a/arch/powerpc/platforms/powernv/pci.c
428 +++ b/arch/powerpc/platforms/powernv/pci.c
429 @@ -197,8 +197,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
430 be64_to_cpu(data->dma1ErrorLog1));
431
432 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
433 - if ((data->pestA[i] >> 63) == 0 &&
434 - (data->pestB[i] >> 63) == 0)
435 + if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
436 + (be64_to_cpu(data->pestB[i]) >> 63) == 0)
437 continue;
438
439 pr_info("PE[%3d] A/B: %016llx %016llx\n",
440 diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
441 index b7a67e3d2201..3ae43282460e 100644
442 --- a/arch/powerpc/platforms/pseries/lpar.c
443 +++ b/arch/powerpc/platforms/pseries/lpar.c
444 @@ -406,7 +406,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
445 unsigned long *vpn, int count,
446 int psize, int ssize)
447 {
448 - unsigned long param[8];
449 + unsigned long param[PLPAR_HCALL9_BUFSIZE];
450 int i = 0, pix = 0, rc;
451 unsigned long flags = 0;
452 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
453 @@ -523,7 +523,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
454 unsigned long flags = 0;
455 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
456 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
457 - unsigned long param[9];
458 + unsigned long param[PLPAR_HCALL9_BUFSIZE];
459 unsigned long hash, index, shift, hidx, slot;
460 real_pte_t pte;
461 int psize, ssize;
462 diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
463 index a2e6ef32e054..0a2031618f7f 100644
464 --- a/arch/s390/include/asm/tlbflush.h
465 +++ b/arch/s390/include/asm/tlbflush.h
466 @@ -81,7 +81,8 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
467 }
468
469 /*
470 - * Flush TLB entries for a specific ASCE on all CPUs.
471 + * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
472 + * when more than one asce (e.g. gmap) ran on this mm.
473 */
474 static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
475 {
476 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
477 index 471a370a527b..8345ae1f117d 100644
478 --- a/arch/s390/mm/pgtable.c
479 +++ b/arch/s390/mm/pgtable.c
480 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
481 static void gmap_flush_tlb(struct gmap *gmap)
482 {
483 if (MACHINE_HAS_IDTE)
484 - __tlb_flush_asce(gmap->mm, gmap->asce);
485 + __tlb_flush_idte(gmap->asce);
486 else
487 __tlb_flush_global();
488 }
489 @@ -205,7 +205,7 @@ void gmap_free(struct gmap *gmap)
490
491 /* Flush tlb. */
492 if (MACHINE_HAS_IDTE)
493 - __tlb_flush_asce(gmap->mm, gmap->asce);
494 + __tlb_flush_idte(gmap->asce);
495 else
496 __tlb_flush_global();
497
498 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
499 index 38b3ead7222d..52a2526c3fbe 100644
500 --- a/arch/x86/kernel/e820.c
501 +++ b/arch/x86/kernel/e820.c
502 @@ -347,7 +347,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
503 * continue building up new bios map based on this
504 * information
505 */
506 - if (current_type != last_type) {
507 + if (current_type != last_type || current_type == E820_PRAM) {
508 if (last_type != 0) {
509 new_bios[new_bios_entry].size =
510 change_point[chgidx]->addr - last_addr;
511 diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
512 index 88d0a92d3f94..3aab53f8cad2 100644
513 --- a/arch/x86/kvm/ioapic.c
514 +++ b/arch/x86/kvm/ioapic.c
515 @@ -580,7 +580,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
516 ioapic->irr = 0;
517 ioapic->irr_delivered = 0;
518 ioapic->id = 0;
519 - memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
520 + memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
521 rtc_irq_eoi_tracking_reset(ioapic);
522 }
523
524 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
525 index 5a37188b559f..9d359e05fad7 100644
526 --- a/block/blk-cgroup.c
527 +++ b/block/blk-cgroup.c
528 @@ -1331,10 +1331,8 @@ int blkcg_policy_register(struct blkcg_policy *pol)
529 struct blkcg_policy_data *cpd;
530
531 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
532 - if (!cpd) {
533 - mutex_unlock(&blkcg_pol_mutex);
534 + if (!cpd)
535 goto err_free_cpds;
536 - }
537
538 blkcg->cpd[pol->plid] = cpd;
539 cpd->blkcg = blkcg;
540 diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
541 index 758acabf2d81..8f3056cd0399 100644
542 --- a/crypto/asymmetric_keys/pkcs7_parser.c
543 +++ b/crypto/asymmetric_keys/pkcs7_parser.c
544 @@ -547,9 +547,7 @@ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen,
545 struct pkcs7_signed_info *sinfo = ctx->sinfo;
546
547 if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) ||
548 - !test_bit(sinfo_has_message_digest, &sinfo->aa_set) ||
549 - (ctx->msg->data_type == OID_msIndirectData &&
550 - !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) {
551 + !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) {
552 pr_warn("Missing required AuthAttr\n");
553 return -EBADMSG;
554 }
555 diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
556 index 5230e8449d30..c097f477c74c 100644
557 --- a/drivers/acpi/nfit.c
558 +++ b/drivers/acpi/nfit.c
559 @@ -1806,6 +1806,9 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
560
561 dev_dbg(dev, "%s: event: %d\n", __func__, event);
562
563 + if (event != NFIT_NOTIFY_UPDATE)
564 + return;
565 +
566 device_lock(dev);
567 if (!dev->driver) {
568 /* dev->driver may be null if we're being removed */
569 diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
570 index 3d549a383659..13d6ec1ff055 100644
571 --- a/drivers/acpi/nfit.h
572 +++ b/drivers/acpi/nfit.h
573 @@ -45,6 +45,10 @@ enum {
574 ND_BLK_DCR_LATCH = 2,
575 };
576
577 +enum nfit_root_notifiers {
578 + NFIT_NOTIFY_UPDATE = 0x80,
579 +};
580 +
581 struct nfit_spa {
582 struct acpi_nfit_system_address *spa;
583 struct list_head list;
584 diff --git a/drivers/base/platform.c b/drivers/base/platform.c
585 index 176b59f5bc47..ba66330cea67 100644
586 --- a/drivers/base/platform.c
587 +++ b/drivers/base/platform.c
588 @@ -96,7 +96,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
589 int ret;
590
591 ret = of_irq_get(dev->dev.of_node, num);
592 - if (ret >= 0 || ret == -EPROBE_DEFER)
593 + if (ret > 0 || ret == -EPROBE_DEFER)
594 return ret;
595 }
596
597 @@ -154,7 +154,7 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
598 int ret;
599
600 ret = of_irq_get_byname(dev->dev.of_node, name);
601 - if (ret >= 0 || ret == -EPROBE_DEFER)
602 + if (ret > 0 || ret == -EPROBE_DEFER)
603 return ret;
604 }
605
606 diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
607 index c1935081d34a..aab64205d866 100644
608 --- a/drivers/clk/imx/clk-imx6q.c
609 +++ b/drivers/clk/imx/clk-imx6q.c
610 @@ -550,6 +550,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
611 if (IS_ENABLED(CONFIG_PCI_IMX6))
612 clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
613
614 + /*
615 + * Initialize the GPU clock muxes, so that the maximum specified clock
616 + * rates for the respective SoC are not exceeded.
617 + */
618 + if (clk_on_imx6dl()) {
619 + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
620 + clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
621 + clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
622 + clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
623 + } else if (clk_on_imx6q()) {
624 + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL],
625 + clk[IMX6QDL_CLK_MMDC_CH0_AXI]);
626 + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL],
627 + clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
628 + clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL],
629 + clk[IMX6QDL_CLK_PLL3_USB_OTG]);
630 + }
631 +
632 imx_register_uart_clocks(uart_clks);
633 }
634 CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
635 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
636 index 6e80e4298274..7ff8b15a3422 100644
637 --- a/drivers/cpufreq/intel_pstate.c
638 +++ b/drivers/cpufreq/intel_pstate.c
639 @@ -285,14 +285,14 @@ static void intel_pstate_hwp_set(void)
640 int min, hw_min, max, hw_max, cpu, range, adj_range;
641 u64 value, cap;
642
643 - rdmsrl(MSR_HWP_CAPABILITIES, cap);
644 - hw_min = HWP_LOWEST_PERF(cap);
645 - hw_max = HWP_HIGHEST_PERF(cap);
646 - range = hw_max - hw_min;
647 -
648 get_online_cpus();
649
650 for_each_online_cpu(cpu) {
651 + rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
652 + hw_min = HWP_LOWEST_PERF(cap);
653 + hw_max = HWP_HIGHEST_PERF(cap);
654 + range = hw_max - hw_min;
655 +
656 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
657 adj_range = limits->min_perf_pct * range / 100;
658 min = hw_min + adj_range;
659 diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
660 index 48ef368347ab..9e02cb6afb0b 100644
661 --- a/drivers/gpio/gpio-mpc8xxx.c
662 +++ b/drivers/gpio/gpio-mpc8xxx.c
663 @@ -329,7 +329,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
664 irq_hw_number_t hwirq)
665 {
666 irq_set_chip_data(irq, h->host_data);
667 - irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
668 + irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
669
670 return 0;
671 }
672 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
673 index be5b399da5d3..43482ae1e049 100644
674 --- a/drivers/input/mouse/elantech.c
675 +++ b/drivers/input/mouse/elantech.c
676 @@ -1163,6 +1163,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
677 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
678 },
679 },
680 + {
681 + /* Fujitsu H760 also has a middle button */
682 + .matches = {
683 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
684 + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
685 + },
686 + },
687 #endif
688 { }
689 };
690 @@ -1507,10 +1514,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
691 },
692 },
693 {
694 - /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
695 + /* Fujitsu H760 does not work with crc_enabled == 0 */
696 .matches = {
697 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
698 - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
699 + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
700 },
701 },
702 {
703 @@ -1521,6 +1528,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
704 },
705 },
706 {
707 + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
708 + .matches = {
709 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
710 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
711 + },
712 + },
713 + {
714 + /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
715 + .matches = {
716 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
717 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
718 + },
719 + },
720 + {
721 /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
722 .matches = {
723 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
724 diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
725 index a5eed2ade53d..34da81c006b6 100644
726 --- a/drivers/input/serio/i8042-io.h
727 +++ b/drivers/input/serio/i8042-io.h
728 @@ -81,7 +81,7 @@ static inline int i8042_platform_init(void)
729 return -EBUSY;
730 #endif
731
732 - i8042_reset = 1;
733 + i8042_reset = I8042_RESET_ALWAYS;
734 return 0;
735 }
736
737 diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
738 index ee1ad27d6ed0..08a1c10a1448 100644
739 --- a/drivers/input/serio/i8042-ip22io.h
740 +++ b/drivers/input/serio/i8042-ip22io.h
741 @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
742 return -EBUSY;
743 #endif
744
745 - i8042_reset = 1;
746 + i8042_reset = I8042_RESET_ALWAYS;
747
748 return 0;
749 }
750 diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
751 index f708c75d16f1..1aabea43329e 100644
752 --- a/drivers/input/serio/i8042-ppcio.h
753 +++ b/drivers/input/serio/i8042-ppcio.h
754 @@ -44,7 +44,7 @@ static inline void i8042_write_command(int val)
755
756 static inline int i8042_platform_init(void)
757 {
758 - i8042_reset = 1;
759 + i8042_reset = I8042_RESET_ALWAYS;
760 return 0;
761 }
762
763 diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
764 index afcd1c1a05b2..6231d63860ee 100644
765 --- a/drivers/input/serio/i8042-sparcio.h
766 +++ b/drivers/input/serio/i8042-sparcio.h
767 @@ -130,7 +130,7 @@ static int __init i8042_platform_init(void)
768 }
769 }
770
771 - i8042_reset = 1;
772 + i8042_reset = I8042_RESET_ALWAYS;
773
774 return 0;
775 }
776 diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
777 index 73f5cc124a36..455747552f85 100644
778 --- a/drivers/input/serio/i8042-unicore32io.h
779 +++ b/drivers/input/serio/i8042-unicore32io.h
780 @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
781 if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
782 return -EBUSY;
783
784 - i8042_reset = 1;
785 + i8042_reset = I8042_RESET_ALWAYS;
786 return 0;
787 }
788
789 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
790 index 68f5f4a0f1e7..f4bfb4b2d50a 100644
791 --- a/drivers/input/serio/i8042-x86ia64io.h
792 +++ b/drivers/input/serio/i8042-x86ia64io.h
793 @@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
794 { }
795 };
796
797 +/*
798 + * On some Asus laptops, just running self tests cause problems.
799 + */
800 +static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
801 + {
802 + .matches = {
803 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
804 + DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
805 + },
806 + },
807 + {
808 + .matches = {
809 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
810 + DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
811 + },
812 + },
813 + {
814 + .matches = {
815 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
816 + DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
817 + },
818 + },
819 + {
820 + .matches = {
821 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
822 + DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
823 + },
824 + },
825 + {
826 + .matches = {
827 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
828 + DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
829 + },
830 + },
831 + {
832 + .matches = {
833 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
834 + DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
835 + },
836 + },
837 + {
838 + .matches = {
839 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
840 + DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
841 + },
842 + },
843 + {
844 + .matches = {
845 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
846 + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
847 + },
848 + },
849 + {
850 + .matches = {
851 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
852 + DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
853 + },
854 + },
855 + {
856 + .matches = {
857 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
858 + DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
859 + },
860 + },
861 + {
862 + .matches = {
863 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
864 + DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
865 + },
866 + },
867 + {
868 + .matches = {
869 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
870 + DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
871 + },
872 + },
873 + {
874 + .matches = {
875 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
876 + DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
877 + },
878 + },
879 + { }
880 +};
881 static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
882 {
883 /* MSI Wind U-100 */
884 @@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void)
885 return retval;
886
887 #if defined(__ia64__)
888 - i8042_reset = true;
889 + i8042_reset = I8042_RESET_ALWAYS;
890 #endif
891
892 #ifdef CONFIG_X86
893 - if (dmi_check_system(i8042_dmi_reset_table))
894 - i8042_reset = true;
895 + /* Honor module parameter when value is not default */
896 + if (i8042_reset == I8042_RESET_DEFAULT) {
897 + if (dmi_check_system(i8042_dmi_reset_table))
898 + i8042_reset = I8042_RESET_ALWAYS;
899 +
900 + if (dmi_check_system(i8042_dmi_noselftest_table))
901 + i8042_reset = I8042_RESET_NEVER;
902 + }
903
904 if (dmi_check_system(i8042_dmi_noloop_table))
905 i8042_noloop = true;
906 diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
907 index 405252a884dd..89abfdb539ac 100644
908 --- a/drivers/input/serio/i8042.c
909 +++ b/drivers/input/serio/i8042.c
910 @@ -48,9 +48,39 @@ static bool i8042_unlock;
911 module_param_named(unlock, i8042_unlock, bool, 0);
912 MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
913
914 -static bool i8042_reset;
915 -module_param_named(reset, i8042_reset, bool, 0);
916 -MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
917 +enum i8042_controller_reset_mode {
918 + I8042_RESET_NEVER,
919 + I8042_RESET_ALWAYS,
920 + I8042_RESET_ON_S2RAM,
921 +#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM
922 +};
923 +static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
924 +static int i8042_set_reset(const char *val, const struct kernel_param *kp)
925 +{
926 + enum i8042_controller_reset_mode *arg = kp->arg;
927 + int error;
928 + bool reset;
929 +
930 + if (val) {
931 + error = kstrtobool(val, &reset);
932 + if (error)
933 + return error;
934 + } else {
935 + reset = true;
936 + }
937 +
938 + *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
939 + return 0;
940 +}
941 +
942 +static const struct kernel_param_ops param_ops_reset_param = {
943 + .flags = KERNEL_PARAM_OPS_FL_NOARG,
944 + .set = i8042_set_reset,
945 +};
946 +#define param_check_reset_param(name, p) \
947 + __param_check(name, p, enum i8042_controller_reset_mode)
948 +module_param_named(reset, i8042_reset, reset_param, 0);
949 +MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
950
951 static bool i8042_direct;
952 module_param_named(direct, i8042_direct, bool, 0);
953 @@ -1019,7 +1049,7 @@ static int i8042_controller_init(void)
954 * Reset the controller and reset CRT to the original value set by BIOS.
955 */
956
957 -static void i8042_controller_reset(bool force_reset)
958 +static void i8042_controller_reset(bool s2r_wants_reset)
959 {
960 i8042_flush();
961
962 @@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset)
963 * Reset the controller if requested.
964 */
965
966 - if (i8042_reset || force_reset)
967 + if (i8042_reset == I8042_RESET_ALWAYS ||
968 + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
969 i8042_controller_selftest();
970 + }
971
972 /*
973 * Restore the original control register setting.
974 @@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void)
975 * before suspending.
976 */
977
978 -static int i8042_controller_resume(bool force_reset)
979 +static int i8042_controller_resume(bool s2r_wants_reset)
980 {
981 int error;
982
983 @@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset)
984 if (error)
985 return error;
986
987 - if (i8042_reset || force_reset) {
988 + if (i8042_reset == I8042_RESET_ALWAYS ||
989 + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
990 error = i8042_controller_selftest();
991 if (error)
992 return error;
993 @@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev)
994
995 static int i8042_pm_resume(struct device *dev)
996 {
997 - bool force_reset;
998 + bool want_reset;
999 int i;
1000
1001 for (i = 0; i < I8042_NUM_PORTS; i++) {
1002 @@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev)
1003 * off control to the platform firmware, otherwise we can simply restore
1004 * the mode.
1005 */
1006 - force_reset = pm_resume_via_firmware();
1007 + want_reset = pm_resume_via_firmware();
1008
1009 - return i8042_controller_resume(force_reset);
1010 + return i8042_controller_resume(want_reset);
1011 }
1012
1013 static int i8042_pm_thaw(struct device *dev)
1014 @@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev)
1015
1016 i8042_platform_device = dev;
1017
1018 - if (i8042_reset) {
1019 + if (i8042_reset == I8042_RESET_ALWAYS) {
1020 error = i8042_controller_selftest();
1021 if (error)
1022 return error;
1023 diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1024 index 44aa57edf207..e33c729b9f48 100644
1025 --- a/drivers/irqchip/irq-gic-v3.c
1026 +++ b/drivers/irqchip/irq-gic-v3.c
1027 @@ -142,7 +142,7 @@ static void gic_enable_redist(bool enable)
1028 return; /* No PM support in this redistributor */
1029 }
1030
1031 - while (count--) {
1032 + while (--count) {
1033 val = readl_relaxed(rbase + GICR_WAKER);
1034 if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
1035 break;
1036 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1037 index 51eda7235e32..5cac11d7a876 100644
1038 --- a/drivers/md/dm-crypt.c
1039 +++ b/drivers/md/dm-crypt.c
1040 @@ -112,8 +112,7 @@ struct iv_tcw_private {
1041 * and encrypts / decrypts at the same time.
1042 */
1043 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
1044 - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
1045 - DM_CRYPT_EXIT_THREAD};
1046 + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
1047
1048 /*
1049 * The fields in here must be read only after initialization.
1050 @@ -1204,18 +1203,20 @@ continue_locked:
1051 if (!RB_EMPTY_ROOT(&cc->write_tree))
1052 goto pop_from_list;
1053
1054 - if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
1055 - spin_unlock_irq(&cc->write_thread_wait.lock);
1056 - break;
1057 - }
1058 -
1059 - __set_current_state(TASK_INTERRUPTIBLE);
1060 + set_current_state(TASK_INTERRUPTIBLE);
1061 __add_wait_queue(&cc->write_thread_wait, &wait);
1062
1063 spin_unlock_irq(&cc->write_thread_wait.lock);
1064
1065 + if (unlikely(kthread_should_stop())) {
1066 + set_task_state(current, TASK_RUNNING);
1067 + remove_wait_queue(&cc->write_thread_wait, &wait);
1068 + break;
1069 + }
1070 +
1071 schedule();
1072
1073 + set_task_state(current, TASK_RUNNING);
1074 spin_lock_irq(&cc->write_thread_wait.lock);
1075 __remove_wait_queue(&cc->write_thread_wait, &wait);
1076 goto continue_locked;
1077 @@ -1530,13 +1531,8 @@ static void crypt_dtr(struct dm_target *ti)
1078 if (!cc)
1079 return;
1080
1081 - if (cc->write_thread) {
1082 - spin_lock_irq(&cc->write_thread_wait.lock);
1083 - set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
1084 - wake_up_locked(&cc->write_thread_wait);
1085 - spin_unlock_irq(&cc->write_thread_wait.lock);
1086 + if (cc->write_thread)
1087 kthread_stop(cc->write_thread);
1088 - }
1089
1090 if (cc->io_queue)
1091 destroy_workqueue(cc->io_queue);
1092 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1093 index cfa29f574c2a..5b2ef966012b 100644
1094 --- a/drivers/md/dm-mpath.c
1095 +++ b/drivers/md/dm-mpath.c
1096 @@ -1220,10 +1220,10 @@ static void activate_path(struct work_struct *work)
1097 {
1098 struct pgpath *pgpath =
1099 container_of(work, struct pgpath, activate_path.work);
1100 + struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1101
1102 - if (pgpath->is_active)
1103 - scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
1104 - pg_init_done, pgpath);
1105 + if (pgpath->is_active && !blk_queue_dying(q))
1106 + scsi_dh_activate(q, pg_init_done, pgpath);
1107 else
1108 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1109 }
1110 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1111 index a42729ebf272..84aa8b1d0480 100644
1112 --- a/drivers/md/dm.c
1113 +++ b/drivers/md/dm.c
1114 @@ -2869,6 +2869,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
1115
1116 static void __dm_destroy(struct mapped_device *md, bool wait)
1117 {
1118 + struct request_queue *q = dm_get_md_queue(md);
1119 struct dm_table *map;
1120 int srcu_idx;
1121
1122 @@ -2879,6 +2880,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
1123 set_bit(DMF_FREEING, &md->flags);
1124 spin_unlock(&_minor_lock);
1125
1126 + spin_lock_irq(q->queue_lock);
1127 + queue_flag_set(QUEUE_FLAG_DYING, q);
1128 + spin_unlock_irq(q->queue_lock);
1129 +
1130 if (dm_request_based(md) && md->kworker_task)
1131 flush_kthread_worker(&md->kworker);
1132
1133 @@ -3245,10 +3250,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
1134
1135 int dm_resume(struct mapped_device *md)
1136 {
1137 - int r = -EINVAL;
1138 + int r;
1139 struct dm_table *map = NULL;
1140
1141 retry:
1142 + r = -EINVAL;
1143 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
1144
1145 if (!dm_suspended_md(md))
1146 @@ -3272,8 +3278,6 @@ retry:
1147 goto out;
1148
1149 clear_bit(DMF_SUSPENDED, &md->flags);
1150 -
1151 - r = 0;
1152 out:
1153 mutex_unlock(&md->suspend_lock);
1154
1155 diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
1156 index cfc005ee11d8..7fc72de2434c 100644
1157 --- a/drivers/media/dvb-frontends/mb86a20s.c
1158 +++ b/drivers/media/dvb-frontends/mb86a20s.c
1159 @@ -71,25 +71,27 @@ static struct regdata mb86a20s_init1[] = {
1160 };
1161
1162 static struct regdata mb86a20s_init2[] = {
1163 - { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 },
1164 + { 0x50, 0xd1 }, { 0x51, 0x22 },
1165 + { 0x39, 0x01 },
1166 + { 0x71, 0x00 },
1167 { 0x3b, 0x21 },
1168 - { 0x3c, 0x38 },
1169 + { 0x3c, 0x3a },
1170 { 0x01, 0x0d },
1171 - { 0x04, 0x08 }, { 0x05, 0x03 },
1172 + { 0x04, 0x08 }, { 0x05, 0x05 },
1173 { 0x04, 0x0e }, { 0x05, 0x00 },
1174 - { 0x04, 0x0f }, { 0x05, 0x37 },
1175 - { 0x04, 0x0b }, { 0x05, 0x78 },
1176 + { 0x04, 0x0f }, { 0x05, 0x14 },
1177 + { 0x04, 0x0b }, { 0x05, 0x8c },
1178 { 0x04, 0x00 }, { 0x05, 0x00 },
1179 - { 0x04, 0x01 }, { 0x05, 0x1e },
1180 - { 0x04, 0x02 }, { 0x05, 0x07 },
1181 - { 0x04, 0x03 }, { 0x05, 0xd0 },
1182 + { 0x04, 0x01 }, { 0x05, 0x07 },
1183 + { 0x04, 0x02 }, { 0x05, 0x0f },
1184 + { 0x04, 0x03 }, { 0x05, 0xa0 },
1185 { 0x04, 0x09 }, { 0x05, 0x00 },
1186 { 0x04, 0x0a }, { 0x05, 0xff },
1187 - { 0x04, 0x27 }, { 0x05, 0x00 },
1188 + { 0x04, 0x27 }, { 0x05, 0x64 },
1189 { 0x04, 0x28 }, { 0x05, 0x00 },
1190 - { 0x04, 0x1e }, { 0x05, 0x00 },
1191 - { 0x04, 0x29 }, { 0x05, 0x64 },
1192 - { 0x04, 0x32 }, { 0x05, 0x02 },
1193 + { 0x04, 0x1e }, { 0x05, 0xff },
1194 + { 0x04, 0x29 }, { 0x05, 0x0a },
1195 + { 0x04, 0x32 }, { 0x05, 0x0a },
1196 { 0x04, 0x14 }, { 0x05, 0x02 },
1197 { 0x04, 0x04 }, { 0x05, 0x00 },
1198 { 0x04, 0x05 }, { 0x05, 0x22 },
1199 @@ -97,8 +99,6 @@ static struct regdata mb86a20s_init2[] = {
1200 { 0x04, 0x07 }, { 0x05, 0xd8 },
1201 { 0x04, 0x12 }, { 0x05, 0x00 },
1202 { 0x04, 0x13 }, { 0x05, 0xff },
1203 - { 0x04, 0x15 }, { 0x05, 0x4e },
1204 - { 0x04, 0x16 }, { 0x05, 0x20 },
1205
1206 /*
1207 * On this demod, when the bit count reaches the count below,
1208 @@ -152,42 +152,36 @@ static struct regdata mb86a20s_init2[] = {
1209 { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */
1210 { 0x45, 0x04 }, /* CN symbol 4 */
1211 { 0x48, 0x04 }, /* CN manual mode */
1212 -
1213 + { 0x50, 0xd5 }, { 0x51, 0x01 },
1214 { 0x50, 0xd6 }, { 0x51, 0x1f },
1215 { 0x50, 0xd2 }, { 0x51, 0x03 },
1216 - { 0x50, 0xd7 }, { 0x51, 0xbf },
1217 - { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff },
1218 - { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c },
1219 -
1220 - { 0x04, 0x40 }, { 0x05, 0x00 },
1221 - { 0x28, 0x00 }, { 0x2b, 0x08 },
1222 - { 0x28, 0x05 }, { 0x2b, 0x00 },
1223 + { 0x50, 0xd7 }, { 0x51, 0x3f },
1224 { 0x1c, 0x01 },
1225 - { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f },
1226 - { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 },
1227 - { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 },
1228 - { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 },
1229 - { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 },
1230 - { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
1231 - { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 },
1232 - { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 },
1233 - { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b },
1234 - { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 },
1235 - { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d },
1236 - { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 },
1237 - { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b },
1238 - { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
1239 - { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 },
1240 - { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 },
1241 - { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 },
1242 - { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
1243 - { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
1244 - { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef },
1245 - { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 },
1246 - { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 },
1247 - { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d },
1248 - { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 },
1249 - { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba },
1250 + { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 },
1251 + { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d },
1252 + { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 },
1253 + { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 },
1254 + { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 },
1255 + { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 },
1256 + { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 },
1257 + { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 },
1258 + { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e },
1259 + { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e },
1260 + { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 },
1261 + { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f },
1262 + { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 },
1263 + { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 },
1264 + { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe },
1265 + { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 },
1266 + { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee },
1267 + { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 },
1268 + { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f },
1269 + { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 },
1270 + { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 },
1271 + { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a },
1272 + { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc },
1273 + { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba },
1274 + { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 },
1275 { 0x50, 0x1e }, { 0x51, 0x5d },
1276 { 0x50, 0x22 }, { 0x51, 0x00 },
1277 { 0x50, 0x23 }, { 0x51, 0xc8 },
1278 @@ -196,9 +190,7 @@ static struct regdata mb86a20s_init2[] = {
1279 { 0x50, 0x26 }, { 0x51, 0x00 },
1280 { 0x50, 0x27 }, { 0x51, 0xc3 },
1281 { 0x50, 0x39 }, { 0x51, 0x02 },
1282 - { 0xec, 0x0f },
1283 - { 0xeb, 0x1f },
1284 - { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 },
1285 + { 0x50, 0xd5 }, { 0x51, 0x01 },
1286 { 0xd0, 0x00 },
1287 };
1288
1289 @@ -317,7 +309,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status)
1290 if (val >= 7)
1291 *status |= FE_HAS_SYNC;
1292
1293 - if (val >= 8) /* Maybe 9? */
1294 + /*
1295 + * Actually, on state S8, it starts receiving TS, but the TS
1296 + * output is only on normal state after the transition to S9.
1297 + */
1298 + if (val >= 9)
1299 *status |= FE_HAS_LOCK;
1300
1301 dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n",
1302 @@ -2067,6 +2063,11 @@ static void mb86a20s_release(struct dvb_frontend *fe)
1303 kfree(state);
1304 }
1305
1306 +static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe)
1307 +{
1308 + return DVBFE_ALGO_HW;
1309 +}
1310 +
1311 static struct dvb_frontend_ops mb86a20s_ops;
1312
1313 struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config,
1314 @@ -2140,6 +2141,7 @@ static struct dvb_frontend_ops mb86a20s_ops = {
1315 .read_status = mb86a20s_read_status_and_stats,
1316 .read_signal_strength = mb86a20s_read_signal_strength_from_cache,
1317 .tune = mb86a20s_tune,
1318 + .get_frontend_algo = mb86a20s_get_frontend_algo,
1319 };
1320
1321 MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware");
1322 diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c
1323 index 491913778bcc..2f52d66b4dae 100644
1324 --- a/drivers/media/usb/cx231xx/cx231xx-avcore.c
1325 +++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c
1326 @@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev,
1327 dev->board.agc_analog_digital_select_gpio,
1328 analog_or_digital);
1329
1330 - return status;
1331 + if (status < 0)
1332 + return status;
1333 +
1334 + return 0;
1335 }
1336
1337 int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3)
1338 diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
1339 index 4a117a58c39a..8389c162bc89 100644
1340 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c
1341 +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
1342 @@ -486,7 +486,7 @@ struct cx231xx_board cx231xx_boards[] = {
1343 .output_mode = OUT_MODE_VIP11,
1344 .demod_xfer_mode = 0,
1345 .ctl_pin_status_mask = 0xFFFFFFC4,
1346 - .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */
1347 + .agc_analog_digital_select_gpio = 0x1c,
1348 .tuner_sif_gpio = -1,
1349 .tuner_scl_gpio = -1,
1350 .tuner_sda_gpio = -1,
1351 diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
1352 index a2fd49b6be83..19b0293312a0 100644
1353 --- a/drivers/media/usb/cx231xx/cx231xx-core.c
1354 +++ b/drivers/media/usb/cx231xx/cx231xx-core.c
1355 @@ -712,6 +712,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
1356 break;
1357 case CX231XX_BOARD_CNXT_RDE_253S:
1358 case CX231XX_BOARD_CNXT_RDU_253S:
1359 + case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
1360 errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1);
1361 break;
1362 case CX231XX_BOARD_HAUPPAUGE_EXETER:
1363 @@ -738,7 +739,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode)
1364 case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID:
1365 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL:
1366 case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC:
1367 - errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
1368 + errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0);
1369 break;
1370 default:
1371 break;
1372 diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
1373 index 1105db2355d2..83bfb1659abe 100644
1374 --- a/drivers/memstick/host/rtsx_usb_ms.c
1375 +++ b/drivers/memstick/host/rtsx_usb_ms.c
1376 @@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
1377 int rc;
1378
1379 if (!host->req) {
1380 + pm_runtime_get_sync(ms_dev(host));
1381 do {
1382 rc = memstick_next_req(msh, &host->req);
1383 dev_dbg(ms_dev(host), "next req %d\n", rc);
1384 @@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
1385 host->req->error);
1386 }
1387 } while (!rc);
1388 + pm_runtime_put(ms_dev(host));
1389 }
1390
1391 }
1392 @@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
1393 dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
1394 __func__, param, value);
1395
1396 + pm_runtime_get_sync(ms_dev(host));
1397 mutex_lock(&ucr->dev_mutex);
1398
1399 err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
1400 @@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
1401 }
1402 out:
1403 mutex_unlock(&ucr->dev_mutex);
1404 + pm_runtime_put(ms_dev(host));
1405
1406 /* power-on delay */
1407 if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
1408 @@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
1409 int err;
1410
1411 for (;;) {
1412 + pm_runtime_get_sync(ms_dev(host));
1413 mutex_lock(&ucr->dev_mutex);
1414
1415 /* Check pending MS card changes */
1416 @@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
1417 }
1418
1419 poll_again:
1420 + pm_runtime_put(ms_dev(host));
1421 if (host->eject)
1422 break;
1423
1424 diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
1425 index a8a68acd3267..4e8069866c85 100644
1426 --- a/drivers/misc/mei/hw-me-regs.h
1427 +++ b/drivers/misc/mei/hw-me-regs.h
1428 @@ -66,6 +66,9 @@
1429 #ifndef _MEI_HW_MEI_REGS_H_
1430 #define _MEI_HW_MEI_REGS_H_
1431
1432 +#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
1433 +#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
1434 +
1435 /*
1436 * MEI device IDs
1437 */
1438 diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
1439 index 27678d8154e0..0af3d7d30419 100644
1440 --- a/drivers/misc/mei/pci-me.c
1441 +++ b/drivers/misc/mei/pci-me.c
1442 @@ -87,6 +87,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
1443 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
1444 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
1445
1446 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
1447 + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
1448 +
1449 /* required last entry */
1450 {0, }
1451 };
1452 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
1453 index 64950035613b..f2b733275a0a 100644
1454 --- a/drivers/mmc/card/block.c
1455 +++ b/drivers/mmc/card/block.c
1456 @@ -1755,7 +1755,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
1457 struct mmc_blk_data *md = mq->data;
1458 struct mmc_packed *packed = mqrq->packed;
1459 bool do_rel_wr, do_data_tag;
1460 - u32 *packed_cmd_hdr;
1461 + __le32 *packed_cmd_hdr;
1462 u8 hdr_blocks;
1463 u8 i = 1;
1464
1465 @@ -2279,7 +2279,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1466 set_capacity(md->disk, size);
1467
1468 if (mmc_host_cmd23(card->host)) {
1469 - if (mmc_card_mmc(card) ||
1470 + if ((mmc_card_mmc(card) &&
1471 + card->csd.mmca_vsn >= CSD_SPEC_VER_3) ||
1472 (mmc_card_sd(card) &&
1473 card->scr.cmds & SD_SCR_CMD23_SUPPORT))
1474 md->flags |= MMC_BLK_CMD23;
1475 diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
1476 index 36cddab57d77..cf30b3712cb2 100644
1477 --- a/drivers/mmc/card/queue.h
1478 +++ b/drivers/mmc/card/queue.h
1479 @@ -25,7 +25,7 @@ enum mmc_packed_type {
1480
1481 struct mmc_packed {
1482 struct list_head list;
1483 - u32 cmd_hdr[1024];
1484 + __le32 cmd_hdr[1024];
1485 unsigned int blocks;
1486 u8 nr_entries;
1487 u8 retries;
1488 diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
1489 index 6c71fc9f76c7..da9f71b8deb0 100644
1490 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c
1491 +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
1492 @@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1493 dev_dbg(sdmmc_dev(host), "%s\n", __func__);
1494 mutex_lock(&ucr->dev_mutex);
1495
1496 - if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
1497 - mutex_unlock(&ucr->dev_mutex);
1498 - return;
1499 - }
1500 -
1501 sd_set_power_mode(host, ios->power_mode);
1502 sd_set_bus_width(host, ios->bus_width);
1503 sd_set_timing(host, ios->timing, &host->ddr_mode);
1504 @@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1505 container_of(work, struct rtsx_usb_sdmmc, led_work);
1506 struct rtsx_ucr *ucr = host->ucr;
1507
1508 + pm_runtime_get_sync(sdmmc_dev(host));
1509 mutex_lock(&ucr->dev_mutex);
1510
1511 if (host->led.brightness == LED_OFF)
1512 @@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
1513 rtsx_usb_turn_on_led(ucr);
1514
1515 mutex_unlock(&ucr->dev_mutex);
1516 + pm_runtime_put(sdmmc_dev(host));
1517 }
1518 #endif
1519
1520 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
1521 index 552a34dc4f82..64a428984afe 100644
1522 --- a/drivers/mmc/host/sdhci.c
1523 +++ b/drivers/mmc/host/sdhci.c
1524 @@ -675,7 +675,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1525 * host->clock is in Hz. target_timeout is in us.
1526 * Hence, us = 1000000 * cycles / Hz. Round up.
1527 */
1528 - val = 1000000 * data->timeout_clks;
1529 + val = 1000000ULL * data->timeout_clks;
1530 if (do_div(val, host->clock))
1531 target_timeout++;
1532 target_timeout += val;
1533 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
1534 index 56065632a5b8..75286588b823 100644
1535 --- a/drivers/mtd/ubi/wl.c
1536 +++ b/drivers/mtd/ubi/wl.c
1537 @@ -643,7 +643,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1538 int shutdown)
1539 {
1540 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1541 - int vol_id = -1, lnum = -1;
1542 + int erase = 0, keep = 0, vol_id = -1, lnum = -1;
1543 #ifdef CONFIG_MTD_UBI_FASTMAP
1544 int anchor = wrk->anchor;
1545 #endif
1546 @@ -777,6 +777,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1547 e1->pnum);
1548 scrubbing = 1;
1549 goto out_not_moved;
1550 + } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
1551 + /*
1552 + * While a full scan would detect interrupted erasures
1553 + * at attach time we can face them here when attached from
1554 + * Fastmap.
1555 + */
1556 + dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
1557 + e1->pnum);
1558 + erase = 1;
1559 + goto out_not_moved;
1560 }
1561
1562 ubi_err(ubi, "error %d while reading VID header from PEB %d",
1563 @@ -810,6 +820,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1564 * Target PEB had bit-flips or write error - torture it.
1565 */
1566 torture = 1;
1567 + keep = 1;
1568 goto out_not_moved;
1569 }
1570
1571 @@ -895,7 +906,7 @@ out_not_moved:
1572 ubi->erroneous_peb_count += 1;
1573 } else if (scrubbing)
1574 wl_tree_add(e1, &ubi->scrub);
1575 - else
1576 + else if (keep)
1577 wl_tree_add(e1, &ubi->used);
1578 ubi_assert(!ubi->move_to_put);
1579 ubi->move_from = ubi->move_to = NULL;
1580 @@ -907,6 +918,12 @@ out_not_moved:
1581 if (err)
1582 goto out_ro;
1583
1584 + if (erase) {
1585 + err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
1586 + if (err)
1587 + goto out_ro;
1588 + }
1589 +
1590 mutex_unlock(&ubi->move_mutex);
1591 return 0;
1592
1593 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1594 index 2e611dc5f162..1c8123816745 100644
1595 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1596 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1597 @@ -14819,6 +14819,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev,
1598 }
1599
1600 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
1601 + if (!offset) {
1602 + DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
1603 + goto out;
1604 + }
1605 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
1606
1607 /* Read the table contents from nvram */
1608 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1609 index 67e9633ea9c7..232191417b93 100644
1610 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1611 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
1612 @@ -2282,7 +2282,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
1613 struct mlx4_en_dev *mdev = en_priv->mdev;
1614 u64 mac_u64 = mlx4_mac_to_u64(mac);
1615
1616 - if (!is_valid_ether_addr(mac))
1617 + if (is_multicast_ether_addr(mac))
1618 return -EINVAL;
1619
1620 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
1621 diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
1622 index 5be34118e0af..f67e7e5b13e1 100644
1623 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c
1624 +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
1625 @@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
1626 return &rtl_regdom_no_midband;
1627 case COUNTRY_CODE_IC:
1628 return &rtl_regdom_11;
1629 - case COUNTRY_CODE_ETSI:
1630 case COUNTRY_CODE_TELEC_NETGEAR:
1631 return &rtl_regdom_60_64;
1632 + case COUNTRY_CODE_ETSI:
1633 case COUNTRY_CODE_SPAIN:
1634 case COUNTRY_CODE_FRANCE:
1635 case COUNTRY_CODE_ISRAEL:
1636 @@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan)
1637 return COUNTRY_CODE_WORLD_WIDE_13;
1638 case 0x22:
1639 return COUNTRY_CODE_IC;
1640 + case 0x25:
1641 + return COUNTRY_CODE_ETSI;
1642 case 0x32:
1643 return COUNTRY_CODE_TELEC_NETGEAR;
1644 case 0x41:
1645 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1646 index 42774bc39786..254192b5dad1 100644
1647 --- a/drivers/pci/quirks.c
1648 +++ b/drivers/pci/quirks.c
1649 @@ -3136,6 +3136,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
1650 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
1651 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
1652 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
1653 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
1654
1655 static void quirk_no_pm_reset(struct pci_dev *dev)
1656 {
1657 diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
1658 index fb991ec76423..696116ebdf50 100644
1659 --- a/drivers/regulator/tps65910-regulator.c
1660 +++ b/drivers/regulator/tps65910-regulator.c
1661 @@ -1111,6 +1111,12 @@ static int tps65910_probe(struct platform_device *pdev)
1662 pmic->num_regulators = ARRAY_SIZE(tps65910_regs);
1663 pmic->ext_sleep_control = tps65910_ext_sleep_control;
1664 info = tps65910_regs;
1665 + /* Work around silicon erratum SWCZ010: output programmed
1666 + * voltage level can go higher than expected or crash
1667 + * Workaround: use no synchronization of DCDC clocks
1668 + */
1669 + tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL,
1670 + DCDCCTRL_DCDCCKSYNC_MASK);
1671 break;
1672 case TPS65911:
1673 pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
1674 diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
1675 index 5d7fbe4e907e..581001989937 100644
1676 --- a/drivers/s390/scsi/zfcp_dbf.c
1677 +++ b/drivers/s390/scsi/zfcp_dbf.c
1678 @@ -3,7 +3,7 @@
1679 *
1680 * Debug traces for zfcp.
1681 *
1682 - * Copyright IBM Corp. 2002, 2013
1683 + * Copyright IBM Corp. 2002, 2016
1684 */
1685
1686 #define KMSG_COMPONENT "zfcp"
1687 @@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
1688 * @tag: tag indicating which kind of unsolicited status has been received
1689 * @req: request for which a response was received
1690 */
1691 -void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
1692 +void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
1693 {
1694 struct zfcp_dbf *dbf = req->adapter->dbf;
1695 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
1696 @@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
1697 rec->u.res.req_issued = req->issued;
1698 rec->u.res.prot_status = q_pref->prot_status;
1699 rec->u.res.fsf_status = q_head->fsf_status;
1700 + rec->u.res.port_handle = q_head->port_handle;
1701 + rec->u.res.lun_handle = q_head->lun_handle;
1702
1703 memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
1704 FSF_PROT_STATUS_QUAL_SIZE);
1705 @@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
1706 rec->pl_len, "fsf_res", req->req_id);
1707 }
1708
1709 - debug_event(dbf->hba, 1, rec, sizeof(*rec));
1710 + debug_event(dbf->hba, level, rec, sizeof(*rec));
1711 spin_unlock_irqrestore(&dbf->hba_lock, flags);
1712 }
1713
1714 @@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
1715 if (sdev) {
1716 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
1717 rec->lun = zfcp_scsi_dev_lun(sdev);
1718 - }
1719 + } else
1720 + rec->lun = ZFCP_DBF_INVALID_LUN;
1721 }
1722
1723 /**
1724 @@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
1725 spin_unlock_irqrestore(&dbf->rec_lock, flags);
1726 }
1727
1728 +/**
1729 + * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
1730 + * @tag: identifier for event
1731 + * @wka_port: well known address port
1732 + * @req_id: request ID to correlate with potential HBA trace record
1733 + */
1734 +void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
1735 + u64 req_id)
1736 +{
1737 + struct zfcp_dbf *dbf = wka_port->adapter->dbf;
1738 + struct zfcp_dbf_rec *rec = &dbf->rec_buf;
1739 + unsigned long flags;
1740 +
1741 + spin_lock_irqsave(&dbf->rec_lock, flags);
1742 + memset(rec, 0, sizeof(*rec));
1743 +
1744 + rec->id = ZFCP_DBF_REC_RUN;
1745 + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
1746 + rec->port_status = wka_port->status;
1747 + rec->d_id = wka_port->d_id;
1748 + rec->lun = ZFCP_DBF_INVALID_LUN;
1749 +
1750 + rec->u.run.fsf_req_id = req_id;
1751 + rec->u.run.rec_status = ~0;
1752 + rec->u.run.rec_step = ~0;
1753 + rec->u.run.rec_action = ~0;
1754 + rec->u.run.rec_count = ~0;
1755 +
1756 + debug_event(dbf->rec, 1, rec, sizeof(*rec));
1757 + spin_unlock_irqrestore(&dbf->rec_lock, flags);
1758 +}
1759 +
1760 static inline
1761 -void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
1762 - u64 req_id, u32 d_id)
1763 +void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
1764 + char *paytag, struct scatterlist *sg, u8 id, u16 len,
1765 + u64 req_id, u32 d_id, u16 cap_len)
1766 {
1767 struct zfcp_dbf_san *rec = &dbf->san_buf;
1768 u16 rec_len;
1769 unsigned long flags;
1770 + struct zfcp_dbf_pay *payload = &dbf->pay_buf;
1771 + u16 pay_sum = 0;
1772
1773 spin_lock_irqsave(&dbf->san_lock, flags);
1774 memset(rec, 0, sizeof(*rec));
1775 @@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
1776 rec->id = id;
1777 rec->fsf_req_id = req_id;
1778 rec->d_id = d_id;
1779 - rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
1780 - memcpy(rec->payload, data, rec_len);
1781 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
1782 + rec->pl_len = len; /* full length even if we cap pay below */
1783 + if (!sg)
1784 + goto out;
1785 + rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
1786 + memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
1787 + if (len <= rec_len)
1788 + goto out; /* skip pay record if full content in rec->payload */
1789 +
1790 + /* if (len > rec_len):
1791 + * dump data up to cap_len ignoring small duplicate in rec->payload
1792 + */
1793 + spin_lock(&dbf->pay_lock);
1794 + memset(payload, 0, sizeof(*payload));
1795 + memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
1796 + payload->fsf_req_id = req_id;
1797 + payload->counter = 0;
1798 + for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
1799 + u16 pay_len, offset = 0;
1800 +
1801 + while (offset < sg->length && pay_sum < cap_len) {
1802 + pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
1803 + (u16)(sg->length - offset));
1804 + /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
1805 + memcpy(payload->data, sg_virt(sg) + offset, pay_len);
1806 + debug_event(dbf->pay, 1, payload,
1807 + zfcp_dbf_plen(pay_len));
1808 + payload->counter++;
1809 + offset += pay_len;
1810 + pay_sum += pay_len;
1811 + }
1812 + }
1813 + spin_unlock(&dbf->pay_lock);
1814
1815 +out:
1816 debug_event(dbf->san, 1, rec, sizeof(*rec));
1817 spin_unlock_irqrestore(&dbf->san_lock, flags);
1818 }
1819 @@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
1820 struct zfcp_fsf_ct_els *ct_els = fsf->data;
1821 u16 length;
1822
1823 - length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
1824 - zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
1825 - fsf->req_id, d_id);
1826 + length = (u16)zfcp_qdio_real_bytes(ct_els->req);
1827 + zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
1828 + length, fsf->req_id, d_id, length);
1829 +}
1830 +
1831 +static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
1832 + struct zfcp_fsf_req *fsf,
1833 + u16 len)
1834 +{
1835 + struct zfcp_fsf_ct_els *ct_els = fsf->data;
1836 + struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
1837 + struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
1838 + struct scatterlist *resp_entry = ct_els->resp;
1839 + struct fc_gpn_ft_resp *acc;
1840 + int max_entries, x, last = 0;
1841 +
1842 + if (!(memcmp(tag, "fsscth2", 7) == 0
1843 + && ct_els->d_id == FC_FID_DIR_SERV
1844 + && reqh->ct_rev == FC_CT_REV
1845 + && reqh->ct_in_id[0] == 0
1846 + && reqh->ct_in_id[1] == 0
1847 + && reqh->ct_in_id[2] == 0
1848 + && reqh->ct_fs_type == FC_FST_DIR
1849 + && reqh->ct_fs_subtype == FC_NS_SUBTYPE
1850 + && reqh->ct_options == 0
1851 + && reqh->_ct_resvd1 == 0
1852 + && reqh->ct_cmd == FC_NS_GPN_FT
1853 + /* reqh->ct_mr_size can vary so do not match but read below */
1854 + && reqh->_ct_resvd2 == 0
1855 + && reqh->ct_reason == 0
1856 + && reqh->ct_explan == 0
1857 + && reqh->ct_vendor == 0
1858 + && reqn->fn_resvd == 0
1859 + && reqn->fn_domain_id_scope == 0
1860 + && reqn->fn_area_id_scope == 0
1861 + && reqn->fn_fc4_type == FC_TYPE_FCP))
1862 + return len; /* not GPN_FT response so do not cap */
1863 +
1864 + acc = sg_virt(resp_entry);
1865 + max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
1866 + + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
1867 + * to account for header as 1st pseudo "entry" */;
1868 +
1869 + /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
1870 + * response, allowing us to skip special handling for it - just skip it
1871 + */
1872 + for (x = 1; x < max_entries && !last; x++) {
1873 + if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
1874 + acc++;
1875 + else
1876 + acc = sg_virt(++resp_entry);
1877 +
1878 + last = acc->fp_flags & FC_NS_FID_LAST;
1879 + }
1880 + len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
1881 + return len; /* cap after last entry */
1882 }
1883
1884 /**
1885 @@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
1886 struct zfcp_fsf_ct_els *ct_els = fsf->data;
1887 u16 length;
1888
1889 - length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
1890 - zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
1891 - fsf->req_id, 0);
1892 + length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
1893 + zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
1894 + length, fsf->req_id, ct_els->d_id,
1895 + zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
1896 }
1897
1898 /**
1899 @@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
1900 struct fsf_status_read_buffer *srb =
1901 (struct fsf_status_read_buffer *) fsf->data;
1902 u16 length;
1903 + struct scatterlist sg;
1904
1905 length = (u16)(srb->length -
1906 offsetof(struct fsf_status_read_buffer, payload));
1907 - zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
1908 - fsf->req_id, ntoh24(srb->d_id));
1909 + sg_init_one(&sg, srb->payload.data, length);
1910 + zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
1911 + fsf->req_id, ntoh24(srb->d_id), length);
1912 }
1913
1914 /**
1915 @@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
1916 * @sc: pointer to struct scsi_cmnd
1917 * @fsf: pointer to struct zfcp_fsf_req
1918 */
1919 -void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
1920 +void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
1921 + struct zfcp_fsf_req *fsf)
1922 {
1923 struct zfcp_adapter *adapter =
1924 (struct zfcp_adapter *) sc->device->host->hostdata[0];
1925 @@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
1926 }
1927 }
1928
1929 - debug_event(dbf->scsi, 1, rec, sizeof(*rec));
1930 + debug_event(dbf->scsi, level, rec, sizeof(*rec));
1931 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
1932 }
1933
1934 diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
1935 index 0be3d48681ae..36d07584271d 100644
1936 --- a/drivers/s390/scsi/zfcp_dbf.h
1937 +++ b/drivers/s390/scsi/zfcp_dbf.h
1938 @@ -2,7 +2,7 @@
1939 * zfcp device driver
1940 * debug feature declarations
1941 *
1942 - * Copyright IBM Corp. 2008, 2010
1943 + * Copyright IBM Corp. 2008, 2015
1944 */
1945
1946 #ifndef ZFCP_DBF_H
1947 @@ -17,6 +17,11 @@
1948
1949 #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
1950
1951 +enum zfcp_dbf_pseudo_erp_act_type {
1952 + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
1953 + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
1954 +};
1955 +
1956 /**
1957 * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
1958 * @ready: number of ready recovery actions
1959 @@ -110,6 +115,7 @@ struct zfcp_dbf_san {
1960 u32 d_id;
1961 #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
1962 char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
1963 + u16 pl_len;
1964 } __packed;
1965
1966 /**
1967 @@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
1968 u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
1969 u32 fsf_status;
1970 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
1971 + u32 port_handle;
1972 + u32 lun_handle;
1973 } __packed;
1974
1975 /**
1976 @@ -279,7 +287,7 @@ static inline
1977 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
1978 {
1979 if (debug_level_enabled(req->adapter->dbf->hba, level))
1980 - zfcp_dbf_hba_fsf_res(tag, req);
1981 + zfcp_dbf_hba_fsf_res(tag, level, req);
1982 }
1983
1984 /**
1985 @@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
1986 scmd->device->host->hostdata[0];
1987
1988 if (debug_level_enabled(adapter->dbf->scsi, level))
1989 - zfcp_dbf_scsi(tag, scmd, req);
1990 + zfcp_dbf_scsi(tag, level, scmd, req);
1991 }
1992
1993 /**
1994 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
1995 index 3fb410977014..a59d678125bd 100644
1996 --- a/drivers/s390/scsi/zfcp_erp.c
1997 +++ b/drivers/s390/scsi/zfcp_erp.c
1998 @@ -3,7 +3,7 @@
1999 *
2000 * Error Recovery Procedures (ERP).
2001 *
2002 - * Copyright IBM Corp. 2002, 2010
2003 + * Copyright IBM Corp. 2002, 2015
2004 */
2005
2006 #define KMSG_COMPONENT "zfcp"
2007 @@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
2008 break;
2009
2010 case ZFCP_ERP_ACTION_REOPEN_PORT:
2011 - if (result == ZFCP_ERP_SUCCEEDED)
2012 - zfcp_scsi_schedule_rport_register(port);
2013 + /* This switch case might also happen after a forced reopen
2014 + * was successfully done and thus overwritten with a new
2015 + * non-forced reopen at `ersfs_2'. In this case, we must not
2016 + * do the clean-up of the non-forced version.
2017 + */
2018 + if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
2019 + if (result == ZFCP_ERP_SUCCEEDED)
2020 + zfcp_scsi_schedule_rport_register(port);
2021 /* fall through */
2022 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
2023 put_device(&port->dev);
2024 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
2025 index 5b500652572b..c8fed9fa1cca 100644
2026 --- a/drivers/s390/scsi/zfcp_ext.h
2027 +++ b/drivers/s390/scsi/zfcp_ext.h
2028 @@ -3,7 +3,7 @@
2029 *
2030 * External function declarations.
2031 *
2032 - * Copyright IBM Corp. 2002, 2010
2033 + * Copyright IBM Corp. 2002, 2015
2034 */
2035
2036 #ifndef ZFCP_EXT_H
2037 @@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
2038 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
2039 struct zfcp_port *, struct scsi_device *, u8, u8);
2040 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
2041 +extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
2042 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
2043 -extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
2044 +extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
2045 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
2046 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
2047 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
2048 @@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
2049 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
2050 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
2051 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
2052 -extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
2053 +extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
2054 + struct zfcp_fsf_req *);
2055
2056 /* zfcp_erp.c */
2057 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
2058 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
2059 index 522a633c866a..75f820ca17b7 100644
2060 --- a/drivers/s390/scsi/zfcp_fsf.c
2061 +++ b/drivers/s390/scsi/zfcp_fsf.c
2062 @@ -3,7 +3,7 @@
2063 *
2064 * Implementation of FSF commands.
2065 *
2066 - * Copyright IBM Corp. 2002, 2013
2067 + * Copyright IBM Corp. 2002, 2015
2068 */
2069
2070 #define KMSG_COMPONENT "zfcp"
2071 @@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
2072 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
2073 break;
2074 case FSF_TOPO_FABRIC:
2075 - fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
2076 + if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
2077 + fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2078 + else
2079 + fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
2080 break;
2081 case FSF_TOPO_AL:
2082 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
2083 @@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
2084
2085 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
2086 fc_host_permanent_port_name(shost) = bottom->wwpn;
2087 - fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
2088 } else
2089 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
2090 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
2091 @@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
2092 if (zfcp_adapter_multi_buffer_active(adapter)) {
2093 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
2094 return -EIO;
2095 + qtcb->bottom.support.req_buf_length =
2096 + zfcp_qdio_real_bytes(sg_req);
2097 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
2098 return -EIO;
2099 + qtcb->bottom.support.resp_buf_length =
2100 + zfcp_qdio_real_bytes(sg_resp);
2101
2102 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2103 zfcp_qdio_sbale_count(sg_req));
2104 @@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
2105
2106 req->handler = zfcp_fsf_send_ct_handler;
2107 req->qtcb->header.port_handle = wka_port->handle;
2108 + ct->d_id = wka_port->d_id;
2109 req->data = ct;
2110
2111 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
2112 @@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
2113
2114 hton24(req->qtcb->bottom.support.d_id, d_id);
2115 req->handler = zfcp_fsf_send_els_handler;
2116 + els->d_id = d_id;
2117 req->data = els;
2118
2119 zfcp_dbf_san_req("fssels1", req, d_id);
2120 @@ -1575,7 +1583,7 @@ out:
2121 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
2122 {
2123 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
2124 - struct zfcp_fsf_req *req;
2125 + struct zfcp_fsf_req *req = NULL;
2126 int retval = -EIO;
2127
2128 spin_lock_irq(&qdio->req_q_lock);
2129 @@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
2130 zfcp_fsf_req_free(req);
2131 out:
2132 spin_unlock_irq(&qdio->req_q_lock);
2133 + if (req && !IS_ERR(req))
2134 + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
2135 return retval;
2136 }
2137
2138 @@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
2139 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
2140 {
2141 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
2142 - struct zfcp_fsf_req *req;
2143 + struct zfcp_fsf_req *req = NULL;
2144 int retval = -EIO;
2145
2146 spin_lock_irq(&qdio->req_q_lock);
2147 @@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
2148 zfcp_fsf_req_free(req);
2149 out:
2150 spin_unlock_irq(&qdio->req_q_lock);
2151 + if (req && !IS_ERR(req))
2152 + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
2153 return retval;
2154 }
2155
2156 diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
2157 index 57ae3ae1046d..be1c04b334c5 100644
2158 --- a/drivers/s390/scsi/zfcp_fsf.h
2159 +++ b/drivers/s390/scsi/zfcp_fsf.h
2160 @@ -3,7 +3,7 @@
2161 *
2162 * Interface to the FSF support functions.
2163 *
2164 - * Copyright IBM Corp. 2002, 2010
2165 + * Copyright IBM Corp. 2002, 2015
2166 */
2167
2168 #ifndef FSF_H
2169 @@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
2170 * @handler_data: data passed to handler function
2171 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
2172 * @status: used to pass error status to calling function
2173 + * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
2174 */
2175 struct zfcp_fsf_ct_els {
2176 struct scatterlist *req;
2177 @@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
2178 void *handler_data;
2179 struct zfcp_port *port;
2180 int status;
2181 + u32 d_id;
2182 };
2183
2184 #endif /* FSF_H */
2185 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
2186 index b3c6ff49103b..9069f98a1817 100644
2187 --- a/drivers/s390/scsi/zfcp_scsi.c
2188 +++ b/drivers/s390/scsi/zfcp_scsi.c
2189 @@ -3,7 +3,7 @@
2190 *
2191 * Interface to Linux SCSI midlayer.
2192 *
2193 - * Copyright IBM Corp. 2002, 2013
2194 + * Copyright IBM Corp. 2002, 2015
2195 */
2196
2197 #define KMSG_COMPONENT "zfcp"
2198 @@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
2199 ids.port_id = port->d_id;
2200 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
2201
2202 + zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
2203 + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
2204 + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
2205 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
2206 if (!rport) {
2207 dev_err(&port->adapter->ccw_device->dev,
2208 @@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
2209 struct fc_rport *rport = port->rport;
2210
2211 if (rport) {
2212 + zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
2213 + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
2214 + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
2215 fc_remote_port_delete(rport);
2216 port->rport = NULL;
2217 }
2218 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
2219 index a3860367b568..e9ce74afd13f 100644
2220 --- a/drivers/scsi/hpsa.c
2221 +++ b/drivers/scsi/hpsa.c
2222 @@ -3930,6 +3930,70 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
2223 return rc;
2224 }
2225
2226 +static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
2227 +{
2228 + struct bmic_identify_physical_device *id_phys;
2229 + bool is_spare = false;
2230 + int rc;
2231 +
2232 + id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
2233 + if (!id_phys)
2234 + return false;
2235 +
2236 + rc = hpsa_bmic_id_physical_device(h,
2237 + lunaddrbytes,
2238 + GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
2239 + id_phys, sizeof(*id_phys));
2240 + if (rc == 0)
2241 + is_spare = (id_phys->more_flags >> 6) & 0x01;
2242 +
2243 + kfree(id_phys);
2244 + return is_spare;
2245 +}
2246 +
2247 +#define RPL_DEV_FLAG_NON_DISK 0x1
2248 +#define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
2249 +#define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
2250 +
2251 +#define BMIC_DEVICE_TYPE_ENCLOSURE 6
2252 +
2253 +static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
2254 + struct ext_report_lun_entry *rle)
2255 +{
2256 + u8 device_flags;
2257 + u8 device_type;
2258 +
2259 + if (!MASKED_DEVICE(lunaddrbytes))
2260 + return false;
2261 +
2262 + device_flags = rle->device_flags;
2263 + device_type = rle->device_type;
2264 +
2265 + if (device_flags & RPL_DEV_FLAG_NON_DISK) {
2266 + if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
2267 + return false;
2268 + return true;
2269 + }
2270 +
2271 + if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
2272 + return false;
2273 +
2274 + if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
2275 + return false;
2276 +
2277 + /*
2278 + * Spares may be spun down, we do not want to
2279 + * do an Inquiry to a RAID set spare drive as
2280 + * that would have them spun up, that is a
2281 + * performance hit because I/O to the RAID device
2282 + * stops while the spin up occurs which can take
2283 + * over 50 seconds.
2284 + */
2285 + if (hpsa_is_disk_spare(h, lunaddrbytes))
2286 + return true;
2287 +
2288 + return false;
2289 +}
2290
2291 static void hpsa_update_scsi_devices(struct ctlr_info *h)
2292 {
2293 @@ -4023,6 +4087,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
2294 u8 *lunaddrbytes, is_OBDR = 0;
2295 int rc = 0;
2296 int phys_dev_index = i - (raid_ctlr_position == 0);
2297 + bool skip_device = false;
2298
2299 physical_device = i < nphysicals + (raid_ctlr_position == 0);
2300
2301 @@ -4030,10 +4095,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
2302 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
2303 i, nphysicals, nlogicals, physdev_list, logdev_list);
2304
2305 - /* skip masked non-disk devices */
2306 - if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
2307 - (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
2308 - continue;
2309 + /*
2310 + * Skip over some devices such as a spare.
2311 + */
2312 + if (!tmpdevice->external && physical_device) {
2313 + skip_device = hpsa_skip_device(h, lunaddrbytes,
2314 + &physdev_list->LUN[phys_dev_index]);
2315 + if (skip_device)
2316 + continue;
2317 + }
2318
2319 /* Get device type, vendor, model, device id */
2320 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
2321 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
2322 index f0cfaacbfabd..692445bcca6f 100644
2323 --- a/drivers/scsi/scsi_scan.c
2324 +++ b/drivers/scsi/scsi_scan.c
2325 @@ -1459,12 +1459,12 @@ retry:
2326 out_err:
2327 kfree(lun_data);
2328 out:
2329 - scsi_device_put(sdev);
2330 if (scsi_device_created(sdev))
2331 /*
2332 * the sdev we used didn't appear in the report luns scan
2333 */
2334 __scsi_remove_device(sdev);
2335 + scsi_device_put(sdev);
2336 return ret;
2337 }
2338
2339 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2340 index 0d7c6e86f149..6ee50742f6a5 100644
2341 --- a/drivers/scsi/sd.c
2342 +++ b/drivers/scsi/sd.c
2343 @@ -2879,10 +2879,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
2344 if (sdkp->opt_xfer_blocks &&
2345 sdkp->opt_xfer_blocks <= dev_max &&
2346 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
2347 - sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
2348 - rw_max = q->limits.io_opt =
2349 - sdkp->opt_xfer_blocks * sdp->sector_size;
2350 - else
2351 + logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_CACHE_SIZE) {
2352 + q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
2353 + rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
2354 + } else
2355 rw_max = BLK_DEF_MAX_SECTORS;
2356
2357 /* Combine with controller limits */
2358 diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
2359 index 654630bb7d0e..765a6f1ac1b7 100644
2360 --- a/drivers/scsi/sd.h
2361 +++ b/drivers/scsi/sd.h
2362 @@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
2363 return blocks << (ilog2(sdev->sector_size) - 9);
2364 }
2365
2366 +static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
2367 +{
2368 + return blocks * sdev->sector_size;
2369 +}
2370 +
2371 /*
2372 * A DIF-capable target device can be formatted with different
2373 * protection schemes. Currently 0 through 3 are defined:
2374 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2375 index 7bc3778a1ac9..2a67af4e2e13 100644
2376 --- a/drivers/target/target_core_transport.c
2377 +++ b/drivers/target/target_core_transport.c
2378 @@ -1680,6 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
2379 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2380 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2381 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2382 + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
2383 break;
2384 case TCM_OUT_OF_RESOURCES:
2385 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2386 @@ -2509,8 +2510,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2387 * fabric acknowledgement that requires two target_put_sess_cmd()
2388 * invocations before se_cmd descriptor release.
2389 */
2390 - if (ack_kref)
2391 + if (ack_kref) {
2392 kref_get(&se_cmd->cmd_kref);
2393 + se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2394 + }
2395
2396 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2397 if (se_sess->sess_tearing_down) {
2398 @@ -2833,6 +2836,12 @@ static const struct sense_info sense_info_table[] = {
2399 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2400 .add_sector_info = true,
2401 },
2402 + [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
2403 + .key = COPY_ABORTED,
2404 + .asc = 0x0d,
2405 + .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
2406 +
2407 + },
2408 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2409 /*
2410 * Returning ILLEGAL REQUEST would cause immediate IO errors on
2411 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
2412 index 47fe94ee10b8..153a6f255b6d 100644
2413 --- a/drivers/target/target_core_xcopy.c
2414 +++ b/drivers/target/target_core_xcopy.c
2415 @@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
2416 }
2417 mutex_unlock(&g_device_mutex);
2418
2419 - pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
2420 + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
2421 return -EINVAL;
2422 }
2423
2424 @@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
2425
2426 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
2427 struct xcopy_op *xop, unsigned char *p,
2428 - unsigned short tdll)
2429 + unsigned short tdll, sense_reason_t *sense_ret)
2430 {
2431 struct se_device *local_dev = se_cmd->se_dev;
2432 unsigned char *desc = p;
2433 @@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
2434 unsigned short start = 0;
2435 bool src = true;
2436
2437 + *sense_ret = TCM_INVALID_PARAMETER_LIST;
2438 +
2439 if (offset != 0) {
2440 pr_err("XCOPY target descriptor list length is not"
2441 " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
2442 @@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
2443 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
2444 else
2445 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
2446 -
2447 - if (rc < 0)
2448 + /*
2449 + * If a matching IEEE NAA 0x83 descriptor for the requested device
2450 + * is not located on this node, return COPY_ABORTED with ASQ/ASQC
2451 + * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
2452 + * initiator to fall back to normal copy method.
2453 + */
2454 + if (rc < 0) {
2455 + *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
2456 goto out;
2457 + }
2458
2459 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
2460 xop->src_dev, &xop->src_tid_wwn[0]);
2461 @@ -653,6 +662,7 @@ static int target_xcopy_read_source(
2462 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
2463 remote_port, true);
2464 if (rc < 0) {
2465 + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2466 transport_generic_free_cmd(se_cmd, 0);
2467 return rc;
2468 }
2469 @@ -664,6 +674,7 @@ static int target_xcopy_read_source(
2470
2471 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
2472 if (rc < 0) {
2473 + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2474 transport_generic_free_cmd(se_cmd, 0);
2475 return rc;
2476 }
2477 @@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
2478 remote_port, false);
2479 if (rc < 0) {
2480 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
2481 + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2482 /*
2483 * If the failure happened before the t_mem_list hand-off in
2484 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
2485 @@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
2486
2487 rc = target_xcopy_issue_pt_cmd(xpt_cmd);
2488 if (rc < 0) {
2489 + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
2490 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
2491 transport_generic_free_cmd(se_cmd, 0);
2492 return rc;
2493 @@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
2494 out:
2495 xcopy_pt_undepend_remotedev(xop);
2496 kfree(xop);
2497 -
2498 - pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
2499 - ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2500 + /*
2501 + * Don't override an error scsi status if it has already been set
2502 + */
2503 + if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
2504 + pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
2505 + " CHECK_CONDITION -> sending response\n", rc);
2506 + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2507 + }
2508 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
2509 }
2510
2511 @@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
2512 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
2513 tdll, sdll, inline_dl);
2514
2515 - rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
2516 + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
2517 if (rc <= 0)
2518 goto out;
2519
2520 diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
2521 index 95d293b7445a..dc2fcda54d53 100644
2522 --- a/drivers/video/fbdev/efifb.c
2523 +++ b/drivers/video/fbdev/efifb.c
2524 @@ -52,9 +52,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
2525 return 1;
2526
2527 if (regno < 16) {
2528 - red >>= 8;
2529 - green >>= 8;
2530 - blue >>= 8;
2531 + red >>= 16 - info->var.red.length;
2532 + green >>= 16 - info->var.green.length;
2533 + blue >>= 16 - info->var.blue.length;
2534 ((u32 *)(info->pseudo_palette))[regno] =
2535 (red << info->var.red.offset) |
2536 (green << info->var.green.offset) |
2537 diff --git a/fs/ceph/file.c b/fs/ceph/file.c
2538 index 3c68e6aee2f0..c8222bfe1e56 100644
2539 --- a/fs/ceph/file.c
2540 +++ b/fs/ceph/file.c
2541 @@ -929,7 +929,8 @@ again:
2542 statret = __ceph_do_getattr(inode, page,
2543 CEPH_STAT_CAP_INLINE_DATA, !!page);
2544 if (statret < 0) {
2545 - __free_page(page);
2546 + if (page)
2547 + __free_page(page);
2548 if (statret == -ENODATA) {
2549 BUG_ON(retry_op != READ_INLINE);
2550 goto again;
2551 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
2552 index 50b268483302..0a3544fb50f9 100644
2553 --- a/fs/cifs/cifs_debug.c
2554 +++ b/fs/cifs/cifs_debug.c
2555 @@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
2556 list_for_each(tmp1, &cifs_tcp_ses_list) {
2557 server = list_entry(tmp1, struct TCP_Server_Info,
2558 tcp_ses_list);
2559 + seq_printf(m, "\nNumber of credits: %d", server->credits);
2560 i++;
2561 list_for_each(tmp2, &server->smb_ses_list) {
2562 ses = list_entry(tmp2, struct cifs_ses,
2563 @@ -255,7 +256,6 @@ static const struct file_operations cifs_debug_data_proc_fops = {
2564 static ssize_t cifs_stats_proc_write(struct file *file,
2565 const char __user *buffer, size_t count, loff_t *ppos)
2566 {
2567 - char c;
2568 bool bv;
2569 int rc;
2570 struct list_head *tmp1, *tmp2, *tmp3;
2571 @@ -263,11 +263,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
2572 struct cifs_ses *ses;
2573 struct cifs_tcon *tcon;
2574
2575 - rc = get_user(c, buffer);
2576 - if (rc)
2577 - return rc;
2578 -
2579 - if (strtobool(&c, &bv) == 0) {
2580 + rc = kstrtobool_from_user(buffer, count, &bv);
2581 + if (rc == 0) {
2582 #ifdef CONFIG_CIFS_STATS2
2583 atomic_set(&totBufAllocCount, 0);
2584 atomic_set(&totSmBufAllocCount, 0);
2585 @@ -290,6 +287,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
2586 }
2587 }
2588 spin_unlock(&cifs_tcp_ses_lock);
2589 + } else {
2590 + return rc;
2591 }
2592
2593 return count;
2594 @@ -433,17 +432,17 @@ static int cifsFYI_proc_open(struct inode *inode, struct file *file)
2595 static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
2596 size_t count, loff_t *ppos)
2597 {
2598 - char c;
2599 + char c[2] = { '\0' };
2600 bool bv;
2601 int rc;
2602
2603 - rc = get_user(c, buffer);
2604 + rc = get_user(c[0], buffer);
2605 if (rc)
2606 return rc;
2607 - if (strtobool(&c, &bv) == 0)
2608 + if (strtobool(c, &bv) == 0)
2609 cifsFYI = bv;
2610 - else if ((c > '1') && (c <= '9'))
2611 - cifsFYI = (int) (c - '0'); /* see cifs_debug.h for meanings */
2612 + else if ((c[0] > '1') && (c[0] <= '9'))
2613 + cifsFYI = (int) (c[0] - '0'); /* see cifs_debug.h for meanings */
2614
2615 return count;
2616 }
2617 @@ -471,20 +470,12 @@ static int cifs_linux_ext_proc_open(struct inode *inode, struct file *file)
2618 static ssize_t cifs_linux_ext_proc_write(struct file *file,
2619 const char __user *buffer, size_t count, loff_t *ppos)
2620 {
2621 - char c;
2622 - bool bv;
2623 int rc;
2624
2625 - rc = get_user(c, buffer);
2626 + rc = kstrtobool_from_user(buffer, count, &linuxExtEnabled);
2627 if (rc)
2628 return rc;
2629
2630 - rc = strtobool(&c, &bv);
2631 - if (rc)
2632 - return rc;
2633 -
2634 - linuxExtEnabled = bv;
2635 -
2636 return count;
2637 }
2638
2639 @@ -511,20 +502,12 @@ static int cifs_lookup_cache_proc_open(struct inode *inode, struct file *file)
2640 static ssize_t cifs_lookup_cache_proc_write(struct file *file,
2641 const char __user *buffer, size_t count, loff_t *ppos)
2642 {
2643 - char c;
2644 - bool bv;
2645 int rc;
2646
2647 - rc = get_user(c, buffer);
2648 + rc = kstrtobool_from_user(buffer, count, &lookupCacheEnabled);
2649 if (rc)
2650 return rc;
2651
2652 - rc = strtobool(&c, &bv);
2653 - if (rc)
2654 - return rc;
2655 -
2656 - lookupCacheEnabled = bv;
2657 -
2658 return count;
2659 }
2660
2661 @@ -551,20 +534,12 @@ static int traceSMB_proc_open(struct inode *inode, struct file *file)
2662 static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
2663 size_t count, loff_t *ppos)
2664 {
2665 - char c;
2666 - bool bv;
2667 int rc;
2668
2669 - rc = get_user(c, buffer);
2670 + rc = kstrtobool_from_user(buffer, count, &traceSMB);
2671 if (rc)
2672 return rc;
2673
2674 - rc = strtobool(&c, &bv);
2675 - if (rc)
2676 - return rc;
2677 -
2678 - traceSMB = bv;
2679 -
2680 return count;
2681 }
2682
2683 @@ -622,7 +597,6 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
2684 int rc;
2685 unsigned int flags;
2686 char flags_string[12];
2687 - char c;
2688 bool bv;
2689
2690 if ((count < 1) || (count > 11))
2691 @@ -635,11 +609,10 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
2692
2693 if (count < 3) {
2694 /* single char or single char followed by null */
2695 - c = flags_string[0];
2696 - if (strtobool(&c, &bv) == 0) {
2697 + if (strtobool(flags_string, &bv) == 0) {
2698 global_secflags = bv ? CIFSSEC_MAX : CIFSSEC_DEF;
2699 return count;
2700 - } else if (!isdigit(c)) {
2701 + } else if (!isdigit(flags_string[0])) {
2702 cifs_dbg(VFS, "Invalid SecurityFlags: %s\n",
2703 flags_string);
2704 return -EINVAL;
2705 diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
2706 index 66cf0f9fff89..c611ca2339d7 100644
2707 --- a/fs/cifs/cifs_debug.h
2708 +++ b/fs/cifs/cifs_debug.h
2709 @@ -25,7 +25,7 @@
2710 void cifs_dump_mem(char *label, void *data, int length);
2711 void cifs_dump_detail(void *);
2712 void cifs_dump_mids(struct TCP_Server_Info *);
2713 -extern int traceSMB; /* flag which enables the function below */
2714 +extern bool traceSMB; /* flag which enables the function below */
2715 void dump_smb(void *, int);
2716 #define CIFS_INFO 0x01
2717 #define CIFS_RC 0x02
2718 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
2719 index 450578097fb7..4f4fc9ff3636 100644
2720 --- a/fs/cifs/cifsfs.c
2721 +++ b/fs/cifs/cifsfs.c
2722 @@ -54,10 +54,10 @@
2723 #endif
2724
2725 int cifsFYI = 0;
2726 -int traceSMB = 0;
2727 +bool traceSMB;
2728 bool enable_oplocks = true;
2729 -unsigned int linuxExtEnabled = 1;
2730 -unsigned int lookupCacheEnabled = 1;
2731 +bool linuxExtEnabled = true;
2732 +bool lookupCacheEnabled = true;
2733 unsigned int global_secflags = CIFSSEC_DEF;
2734 /* unsigned int ntlmv2_support = 0; */
2735 unsigned int sign_CIFS_PDUs = 1;
2736 @@ -268,7 +268,7 @@ cifs_alloc_inode(struct super_block *sb)
2737 cifs_inode->createtime = 0;
2738 cifs_inode->epoch = 0;
2739 #ifdef CONFIG_CIFS_SMB2
2740 - get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
2741 + generate_random_uuid(cifs_inode->lease_key);
2742 #endif
2743 /*
2744 * Can not set i_flags here - they get immediately overwritten to zero
2745 @@ -1210,7 +1210,6 @@ init_cifs(void)
2746 GlobalTotalActiveXid = 0;
2747 GlobalMaxActiveXid = 0;
2748 spin_lock_init(&cifs_tcp_ses_lock);
2749 - spin_lock_init(&cifs_file_list_lock);
2750 spin_lock_init(&GlobalMid_Lock);
2751
2752 if (cifs_max_pending < 2) {
2753 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
2754 index 2b510c537a0d..c669a1471395 100644
2755 --- a/fs/cifs/cifsglob.h
2756 +++ b/fs/cifs/cifsglob.h
2757 @@ -827,6 +827,7 @@ struct cifs_tcon {
2758 struct list_head tcon_list;
2759 int tc_count;
2760 struct list_head openFileList;
2761 + spinlock_t open_file_lock; /* protects list above */
2762 struct cifs_ses *ses; /* pointer to session associated with */
2763 char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
2764 char *nativeFileSystem;
2765 @@ -883,7 +884,7 @@ struct cifs_tcon {
2766 #endif /* CONFIG_CIFS_STATS2 */
2767 __u64 bytes_read;
2768 __u64 bytes_written;
2769 - spinlock_t stat_lock;
2770 + spinlock_t stat_lock; /* protects the two fields above */
2771 #endif /* CONFIG_CIFS_STATS */
2772 FILE_SYSTEM_DEVICE_INFO fsDevInfo;
2773 FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
2774 @@ -1034,8 +1035,10 @@ struct cifs_fid_locks {
2775 };
2776
2777 struct cifsFileInfo {
2778 + /* following two lists are protected by tcon->open_file_lock */
2779 struct list_head tlist; /* pointer to next fid owned by tcon */
2780 struct list_head flist; /* next fid (file instance) for this inode */
2781 + /* lock list below protected by cifsi->lock_sem */
2782 struct cifs_fid_locks *llist; /* brlocks held by this fid */
2783 kuid_t uid; /* allows finding which FileInfo structure */
2784 __u32 pid; /* process id who opened file */
2785 @@ -1043,11 +1046,12 @@ struct cifsFileInfo {
2786 /* BB add lock scope info here if needed */ ;
2787 /* lock scope id (0 if none) */
2788 struct dentry *dentry;
2789 - unsigned int f_flags;
2790 struct tcon_link *tlink;
2791 + unsigned int f_flags;
2792 bool invalidHandle:1; /* file closed via session abend */
2793 bool oplock_break_cancelled:1;
2794 - int count; /* refcount protected by cifs_file_list_lock */
2795 + int count;
2796 + spinlock_t file_info_lock; /* protects four flag/count fields above */
2797 struct mutex fh_mutex; /* prevents reopen race after dead ses*/
2798 struct cifs_search_info srch_inf;
2799 struct work_struct oplock_break; /* work for oplock breaks */
2800 @@ -1114,7 +1118,7 @@ struct cifs_writedata {
2801
2802 /*
2803 * Take a reference on the file private data. Must be called with
2804 - * cifs_file_list_lock held.
2805 + * cfile->file_info_lock held.
2806 */
2807 static inline void
2808 cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
2809 @@ -1508,8 +1512,10 @@ require use of the stronger protocol */
2810 * GlobalMid_Lock protects:
2811 * list operations on pending_mid_q and oplockQ
2812 * updates to XID counters, multiplex id and SMB sequence numbers
2813 - * cifs_file_list_lock protects:
2814 - * list operations on tcp and SMB session lists and tCon lists
2815 + * tcp_ses_lock protects:
2816 + * list operations on tcp and SMB session lists
2817 + * tcon->open_file_lock protects the list of open files hanging off the tcon
2818 + * cfile->file_info_lock protects counters and fields in cifs file struct
2819 * f_owner.lock protects certain per file struct operations
2820 * mapping->page_lock protects certain per page operations
2821 *
2822 @@ -1541,18 +1547,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
2823 * tcp session, and the list of tcon's per smb session. It also protects
2824 * the reference counters for the server, smb session, and tcon. Finally,
2825 * changes to the tcon->tidStatus should be done while holding this lock.
2826 + * generally the locks should be taken in order tcp_ses_lock before
2827 + * tcon->open_file_lock and that before file->file_info_lock since the
2828 + * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
2829 */
2830 GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
2831
2832 -/*
2833 - * This lock protects the cifs_file->llist and cifs_file->flist
2834 - * list operations, and updates to some flags (cifs_file->invalidHandle)
2835 - * It will be moved to either use the tcon->stat_lock or equivalent later.
2836 - * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
2837 - * the cifs_tcp_ses_lock must be grabbed first and released last.
2838 - */
2839 -GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
2840 -
2841 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
2842 /* Outstanding dir notify requests */
2843 GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
2844 @@ -1588,11 +1588,11 @@ GLOBAL_EXTERN atomic_t midCount;
2845
2846 /* Misc globals */
2847 GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
2848 -GLOBAL_EXTERN unsigned int lookupCacheEnabled;
2849 +GLOBAL_EXTERN bool lookupCacheEnabled;
2850 GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
2851 with more secure ntlmssp2 challenge/resp */
2852 GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
2853 -GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
2854 +GLOBAL_EXTERN bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
2855 GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */
2856 GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
2857 GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
2858 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
2859 index 76fcb50295a3..b1104ed8f54c 100644
2860 --- a/fs/cifs/cifssmb.c
2861 +++ b/fs/cifs/cifssmb.c
2862 @@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
2863 struct list_head *tmp1;
2864
2865 /* list all files open on tree connection and mark them invalid */
2866 - spin_lock(&cifs_file_list_lock);
2867 + spin_lock(&tcon->open_file_lock);
2868 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
2869 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
2870 open_file->invalidHandle = true;
2871 open_file->oplock_break_cancelled = true;
2872 }
2873 - spin_unlock(&cifs_file_list_lock);
2874 + spin_unlock(&tcon->open_file_lock);
2875 /*
2876 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
2877 * to this tcon.
2878 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
2879 index 61c3a5ab8637..812a8cb07c63 100644
2880 --- a/fs/cifs/connect.c
2881 +++ b/fs/cifs/connect.c
2882 @@ -2200,7 +2200,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
2883 memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
2884 sizeof(tcp_ses->dstaddr));
2885 #ifdef CONFIG_CIFS_SMB2
2886 - get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
2887 + generate_random_uuid(tcp_ses->client_guid);
2888 #endif
2889 /*
2890 * at this point we are the only ones with the pointer
2891 @@ -3693,14 +3693,16 @@ remote_path_check:
2892 goto mount_fail_check;
2893 }
2894
2895 - rc = cifs_are_all_path_components_accessible(server,
2896 + if (rc != -EREMOTE) {
2897 + rc = cifs_are_all_path_components_accessible(server,
2898 xid, tcon, cifs_sb,
2899 full_path);
2900 - if (rc != 0) {
2901 - cifs_dbg(VFS, "cannot query dirs between root and final path, "
2902 - "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
2903 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
2904 - rc = 0;
2905 + if (rc != 0) {
2906 + cifs_dbg(VFS, "cannot query dirs between root and final path, "
2907 + "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
2908 + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
2909 + rc = 0;
2910 + }
2911 }
2912 kfree(full_path);
2913 }
2914 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2915 index 0068e82217c3..72f270d4bd17 100644
2916 --- a/fs/cifs/file.c
2917 +++ b/fs/cifs/file.c
2918 @@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2919 cfile->tlink = cifs_get_tlink(tlink);
2920 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
2921 mutex_init(&cfile->fh_mutex);
2922 + spin_lock_init(&cfile->file_info_lock);
2923
2924 cifs_sb_active(inode->i_sb);
2925
2926 @@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2927 oplock = 0;
2928 }
2929
2930 - spin_lock(&cifs_file_list_lock);
2931 + spin_lock(&tcon->open_file_lock);
2932 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
2933 oplock = fid->pending_open->oplock;
2934 list_del(&fid->pending_open->olist);
2935 @@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2936 server->ops->set_fid(cfile, fid, oplock);
2937
2938 list_add(&cfile->tlist, &tcon->openFileList);
2939 +
2940 /* if readable file instance put first in list*/
2941 if (file->f_mode & FMODE_READ)
2942 list_add(&cfile->flist, &cinode->openFileList);
2943 else
2944 list_add_tail(&cfile->flist, &cinode->openFileList);
2945 - spin_unlock(&cifs_file_list_lock);
2946 + spin_unlock(&tcon->open_file_lock);
2947
2948 if (fid->purge_cache)
2949 cifs_zap_mapping(inode);
2950 @@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2951 struct cifsFileInfo *
2952 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
2953 {
2954 - spin_lock(&cifs_file_list_lock);
2955 + spin_lock(&cifs_file->file_info_lock);
2956 cifsFileInfo_get_locked(cifs_file);
2957 - spin_unlock(&cifs_file_list_lock);
2958 + spin_unlock(&cifs_file->file_info_lock);
2959 return cifs_file;
2960 }
2961
2962 /*
2963 * Release a reference on the file private data. This may involve closing
2964 * the filehandle out on the server. Must be called without holding
2965 - * cifs_file_list_lock.
2966 + * tcon->open_file_lock and cifs_file->file_info_lock.
2967 */
2968 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2969 {
2970 @@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2971 struct cifs_pending_open open;
2972 bool oplock_break_cancelled;
2973
2974 - spin_lock(&cifs_file_list_lock);
2975 + spin_lock(&tcon->open_file_lock);
2976 +
2977 + spin_lock(&cifs_file->file_info_lock);
2978 if (--cifs_file->count > 0) {
2979 - spin_unlock(&cifs_file_list_lock);
2980 + spin_unlock(&cifs_file->file_info_lock);
2981 + spin_unlock(&tcon->open_file_lock);
2982 return;
2983 }
2984 + spin_unlock(&cifs_file->file_info_lock);
2985
2986 if (server->ops->get_lease_key)
2987 server->ops->get_lease_key(inode, &fid);
2988 @@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
2989 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
2990 cifs_set_oplock_level(cifsi, 0);
2991 }
2992 - spin_unlock(&cifs_file_list_lock);
2993 +
2994 + spin_unlock(&tcon->open_file_lock);
2995
2996 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
2997
2998 @@ -772,10 +779,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
2999 server = tcon->ses->server;
3000
3001 cifs_dbg(FYI, "Freeing private data in close dir\n");
3002 - spin_lock(&cifs_file_list_lock);
3003 + spin_lock(&cfile->file_info_lock);
3004 if (server->ops->dir_needs_close(cfile)) {
3005 cfile->invalidHandle = true;
3006 - spin_unlock(&cifs_file_list_lock);
3007 + spin_unlock(&cfile->file_info_lock);
3008 if (server->ops->close_dir)
3009 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
3010 else
3011 @@ -784,7 +791,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
3012 /* not much we can do if it fails anyway, ignore rc */
3013 rc = 0;
3014 } else
3015 - spin_unlock(&cifs_file_list_lock);
3016 + spin_unlock(&cfile->file_info_lock);
3017
3018 buf = cfile->srch_inf.ntwrk_buf_start;
3019 if (buf) {
3020 @@ -1720,12 +1727,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
3021 {
3022 struct cifsFileInfo *open_file = NULL;
3023 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3024 + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
3025
3026 /* only filter by fsuid on multiuser mounts */
3027 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
3028 fsuid_only = false;
3029
3030 - spin_lock(&cifs_file_list_lock);
3031 + spin_lock(&tcon->open_file_lock);
3032 /* we could simply get the first_list_entry since write-only entries
3033 are always at the end of the list but since the first entry might
3034 have a close pending, we go through the whole list */
3035 @@ -1736,8 +1744,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
3036 if (!open_file->invalidHandle) {
3037 /* found a good file */
3038 /* lock it so it will not be closed on us */
3039 - cifsFileInfo_get_locked(open_file);
3040 - spin_unlock(&cifs_file_list_lock);
3041 + cifsFileInfo_get(open_file);
3042 + spin_unlock(&tcon->open_file_lock);
3043 return open_file;
3044 } /* else might as well continue, and look for
3045 another, or simply have the caller reopen it
3046 @@ -1745,7 +1753,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
3047 } else /* write only file */
3048 break; /* write only files are last so must be done */
3049 }
3050 - spin_unlock(&cifs_file_list_lock);
3051 + spin_unlock(&tcon->open_file_lock);
3052 return NULL;
3053 }
3054
3055 @@ -1754,6 +1762,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
3056 {
3057 struct cifsFileInfo *open_file, *inv_file = NULL;
3058 struct cifs_sb_info *cifs_sb;
3059 + struct cifs_tcon *tcon;
3060 bool any_available = false;
3061 int rc;
3062 unsigned int refind = 0;
3063 @@ -1769,15 +1778,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
3064 }
3065
3066 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
3067 + tcon = cifs_sb_master_tcon(cifs_sb);
3068
3069 /* only filter by fsuid on multiuser mounts */
3070 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
3071 fsuid_only = false;
3072
3073 - spin_lock(&cifs_file_list_lock);
3074 + spin_lock(&tcon->open_file_lock);
3075 refind_writable:
3076 if (refind > MAX_REOPEN_ATT) {
3077 - spin_unlock(&cifs_file_list_lock);
3078 + spin_unlock(&tcon->open_file_lock);
3079 return NULL;
3080 }
3081 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3082 @@ -1788,8 +1798,8 @@ refind_writable:
3083 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3084 if (!open_file->invalidHandle) {
3085 /* found a good writable file */
3086 - cifsFileInfo_get_locked(open_file);
3087 - spin_unlock(&cifs_file_list_lock);
3088 + cifsFileInfo_get(open_file);
3089 + spin_unlock(&tcon->open_file_lock);
3090 return open_file;
3091 } else {
3092 if (!inv_file)
3093 @@ -1805,24 +1815,24 @@ refind_writable:
3094
3095 if (inv_file) {
3096 any_available = false;
3097 - cifsFileInfo_get_locked(inv_file);
3098 + cifsFileInfo_get(inv_file);
3099 }
3100
3101 - spin_unlock(&cifs_file_list_lock);
3102 + spin_unlock(&tcon->open_file_lock);
3103
3104 if (inv_file) {
3105 rc = cifs_reopen_file(inv_file, false);
3106 if (!rc)
3107 return inv_file;
3108 else {
3109 - spin_lock(&cifs_file_list_lock);
3110 + spin_lock(&tcon->open_file_lock);
3111 list_move_tail(&inv_file->flist,
3112 &cifs_inode->openFileList);
3113 - spin_unlock(&cifs_file_list_lock);
3114 + spin_unlock(&tcon->open_file_lock);
3115 cifsFileInfo_put(inv_file);
3116 - spin_lock(&cifs_file_list_lock);
3117 ++refind;
3118 inv_file = NULL;
3119 + spin_lock(&tcon->open_file_lock);
3120 goto refind_writable;
3121 }
3122 }
3123 @@ -3632,15 +3642,17 @@ static int cifs_readpage(struct file *file, struct page *page)
3124 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3125 {
3126 struct cifsFileInfo *open_file;
3127 + struct cifs_tcon *tcon =
3128 + cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
3129
3130 - spin_lock(&cifs_file_list_lock);
3131 + spin_lock(&tcon->open_file_lock);
3132 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3133 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3134 - spin_unlock(&cifs_file_list_lock);
3135 + spin_unlock(&tcon->open_file_lock);
3136 return 1;
3137 }
3138 }
3139 - spin_unlock(&cifs_file_list_lock);
3140 + spin_unlock(&tcon->open_file_lock);
3141 return 0;
3142 }
3143
3144 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
3145 index 8442b8b8e0be..2396ab099849 100644
3146 --- a/fs/cifs/misc.c
3147 +++ b/fs/cifs/misc.c
3148 @@ -120,6 +120,7 @@ tconInfoAlloc(void)
3149 ++ret_buf->tc_count;
3150 INIT_LIST_HEAD(&ret_buf->openFileList);
3151 INIT_LIST_HEAD(&ret_buf->tcon_list);
3152 + spin_lock_init(&ret_buf->open_file_lock);
3153 #ifdef CONFIG_CIFS_STATS
3154 spin_lock_init(&ret_buf->stat_lock);
3155 #endif
3156 @@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
3157 continue;
3158
3159 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
3160 - spin_lock(&cifs_file_list_lock);
3161 + spin_lock(&tcon->open_file_lock);
3162 list_for_each(tmp2, &tcon->openFileList) {
3163 netfile = list_entry(tmp2, struct cifsFileInfo,
3164 tlist);
3165 @@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
3166 &netfile->oplock_break);
3167 netfile->oplock_break_cancelled = false;
3168
3169 - spin_unlock(&cifs_file_list_lock);
3170 + spin_unlock(&tcon->open_file_lock);
3171 spin_unlock(&cifs_tcp_ses_lock);
3172 return true;
3173 }
3174 - spin_unlock(&cifs_file_list_lock);
3175 + spin_unlock(&tcon->open_file_lock);
3176 spin_unlock(&cifs_tcp_ses_lock);
3177 cifs_dbg(FYI, "No matching file for oplock break\n");
3178 return true;
3179 @@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb)
3180 void
3181 cifs_del_pending_open(struct cifs_pending_open *open)
3182 {
3183 - spin_lock(&cifs_file_list_lock);
3184 + spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
3185 list_del(&open->olist);
3186 - spin_unlock(&cifs_file_list_lock);
3187 + spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
3188 }
3189
3190 void
3191 @@ -635,7 +636,7 @@ void
3192 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
3193 struct cifs_pending_open *open)
3194 {
3195 - spin_lock(&cifs_file_list_lock);
3196 + spin_lock(&tlink_tcon(tlink)->open_file_lock);
3197 cifs_add_pending_open_locked(fid, tlink, open);
3198 - spin_unlock(&cifs_file_list_lock);
3199 + spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
3200 }
3201 diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
3202 index b30a4a6d98a0..833e5844a2db 100644
3203 --- a/fs/cifs/readdir.c
3204 +++ b/fs/cifs/readdir.c
3205 @@ -594,14 +594,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
3206 is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
3207 /* close and restart search */
3208 cifs_dbg(FYI, "search backing up - close and restart search\n");
3209 - spin_lock(&cifs_file_list_lock);
3210 + spin_lock(&cfile->file_info_lock);
3211 if (server->ops->dir_needs_close(cfile)) {
3212 cfile->invalidHandle = true;
3213 - spin_unlock(&cifs_file_list_lock);
3214 + spin_unlock(&cfile->file_info_lock);
3215 if (server->ops->close_dir)
3216 server->ops->close_dir(xid, tcon, &cfile->fid);
3217 } else
3218 - spin_unlock(&cifs_file_list_lock);
3219 + spin_unlock(&cfile->file_info_lock);
3220 if (cfile->srch_inf.ntwrk_buf_start) {
3221 cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
3222 if (cfile->srch_inf.smallBuf)
3223 diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
3224 index 0ffa18094335..238759c146ba 100644
3225 --- a/fs/cifs/smb2glob.h
3226 +++ b/fs/cifs/smb2glob.h
3227 @@ -61,4 +61,14 @@
3228 /* Maximum buffer size value we can send with 1 credit */
3229 #define SMB2_MAX_BUFFER_SIZE 65536
3230
3231 +/*
3232 + * Maximum number of credits to keep available.
3233 + * This value is chosen somewhat arbitrarily. The Windows client
3234 + * defaults to 128 credits, the Windows server allows clients up to
3235 + * 512 credits, and the NetApp server does not limit clients at all.
3236 + * Choose a high enough value such that the client shouldn't limit
3237 + * performance.
3238 + */
3239 +#define SMB2_MAX_CREDITS_AVAILABLE 32000
3240 +
3241 #endif /* _SMB2_GLOB_H */
3242 diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
3243 index 4f0231e685a9..1238cd3552f9 100644
3244 --- a/fs/cifs/smb2inode.c
3245 +++ b/fs/cifs/smb2inode.c
3246 @@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
3247 struct tcon_link *tlink;
3248 int rc;
3249
3250 + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
3251 + (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
3252 + (buf->Attributes == 0))
3253 + return 0; /* would be a no op, no sense sending this */
3254 +
3255 tlink = cifs_sb_tlink(cifs_sb);
3256 if (IS_ERR(tlink))
3257 return PTR_ERR(tlink);
3258 +
3259 rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
3260 FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
3261 SMB2_OP_SET_INFO);
3262 diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3263 index 1c5907019045..e5bc85e49be7 100644
3264 --- a/fs/cifs/smb2misc.c
3265 +++ b/fs/cifs/smb2misc.c
3266 @@ -525,19 +525,19 @@ smb2_is_valid_lease_break(char *buffer)
3267 list_for_each(tmp1, &server->smb_ses_list) {
3268 ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
3269
3270 - spin_lock(&cifs_file_list_lock);
3271 list_for_each(tmp2, &ses->tcon_list) {
3272 tcon = list_entry(tmp2, struct cifs_tcon,
3273 tcon_list);
3274 + spin_lock(&tcon->open_file_lock);
3275 cifs_stats_inc(
3276 &tcon->stats.cifs_stats.num_oplock_brks);
3277 if (smb2_tcon_has_lease(tcon, rsp, lw)) {
3278 - spin_unlock(&cifs_file_list_lock);
3279 + spin_unlock(&tcon->open_file_lock);
3280 spin_unlock(&cifs_tcp_ses_lock);
3281 return true;
3282 }
3283 + spin_unlock(&tcon->open_file_lock);
3284 }
3285 - spin_unlock(&cifs_file_list_lock);
3286 }
3287 }
3288 spin_unlock(&cifs_tcp_ses_lock);
3289 @@ -579,7 +579,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3290 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
3291
3292 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
3293 - spin_lock(&cifs_file_list_lock);
3294 + spin_lock(&tcon->open_file_lock);
3295 list_for_each(tmp2, &tcon->openFileList) {
3296 cfile = list_entry(tmp2, struct cifsFileInfo,
3297 tlist);
3298 @@ -591,7 +591,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3299
3300 cifs_dbg(FYI, "file id match, oplock break\n");
3301 cinode = CIFS_I(d_inode(cfile->dentry));
3302 -
3303 + spin_lock(&cfile->file_info_lock);
3304 if (!CIFS_CACHE_WRITE(cinode) &&
3305 rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
3306 cfile->oplock_break_cancelled = true;
3307 @@ -613,14 +613,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3308 clear_bit(
3309 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
3310 &cinode->flags);
3311 -
3312 + spin_unlock(&cfile->file_info_lock);
3313 queue_work(cifsiod_wq, &cfile->oplock_break);
3314
3315 - spin_unlock(&cifs_file_list_lock);
3316 + spin_unlock(&tcon->open_file_lock);
3317 spin_unlock(&cifs_tcp_ses_lock);
3318 return true;
3319 }
3320 - spin_unlock(&cifs_file_list_lock);
3321 + spin_unlock(&tcon->open_file_lock);
3322 spin_unlock(&cifs_tcp_ses_lock);
3323 cifs_dbg(FYI, "No matching file for oplock break\n");
3324 return true;
3325 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3326 index dd8543caa56e..be34b4860675 100644
3327 --- a/fs/cifs/smb2ops.c
3328 +++ b/fs/cifs/smb2ops.c
3329 @@ -282,7 +282,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
3330 cifs_dbg(FYI, "Link Speed %lld\n",
3331 le64_to_cpu(out_buf->LinkSpeed));
3332 }
3333 -
3334 + kfree(out_buf);
3335 return rc;
3336 }
3337 #endif /* STATS2 */
3338 @@ -536,6 +536,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
3339 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
3340 &fid->purge_cache);
3341 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
3342 + memcpy(cfile->fid.create_guid, fid->create_guid, 16);
3343 }
3344
3345 static void
3346 @@ -694,6 +695,7 @@ smb2_clone_range(const unsigned int xid,
3347
3348 cchunk_out:
3349 kfree(pcchunk);
3350 + kfree(retbuf);
3351 return rc;
3352 }
3353
3354 @@ -818,7 +820,6 @@ smb2_duplicate_extents(const unsigned int xid,
3355 {
3356 int rc;
3357 unsigned int ret_data_len;
3358 - char *retbuf = NULL;
3359 struct duplicate_extents_to_file dup_ext_buf;
3360 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
3361
3362 @@ -844,7 +845,7 @@ smb2_duplicate_extents(const unsigned int xid,
3363 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
3364 true /* is_fsctl */, (char *)&dup_ext_buf,
3365 sizeof(struct duplicate_extents_to_file),
3366 - (char **)&retbuf,
3367 + NULL,
3368 &ret_data_len);
3369
3370 if (ret_data_len > 0)
3371 @@ -867,7 +868,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
3372 struct cifsFileInfo *cfile)
3373 {
3374 struct fsctl_set_integrity_information_req integr_info;
3375 - char *retbuf = NULL;
3376 unsigned int ret_data_len;
3377
3378 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
3379 @@ -879,7 +879,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
3380 FSCTL_SET_INTEGRITY_INFORMATION,
3381 true /* is_fsctl */, (char *)&integr_info,
3382 sizeof(struct fsctl_set_integrity_information_req),
3383 - (char **)&retbuf,
3384 + NULL,
3385 &ret_data_len);
3386
3387 }
3388 @@ -1036,7 +1036,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
3389 static void
3390 smb2_new_lease_key(struct cifs_fid *fid)
3391 {
3392 - get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
3393 + generate_random_uuid(fid->lease_key);
3394 }
3395
3396 #define SMB2_SYMLINK_STRUCT_SIZE \
3397 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3398 index 0b6dc1942bdc..0dbbdf5e4aee 100644
3399 --- a/fs/cifs/smb2pdu.c
3400 +++ b/fs/cifs/smb2pdu.c
3401 @@ -103,7 +103,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
3402 hdr->ProtocolId[3] = 'B';
3403 hdr->StructureSize = cpu_to_le16(64);
3404 hdr->Command = smb2_cmd;
3405 - hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
3406 + if (tcon && tcon->ses && tcon->ses->server) {
3407 + struct TCP_Server_Info *server = tcon->ses->server;
3408 +
3409 + spin_lock(&server->req_lock);
3410 + /* Request up to 2 credits but don't go over the limit. */
3411 + if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE)
3412 + hdr->CreditRequest = cpu_to_le16(0);
3413 + else
3414 + hdr->CreditRequest = cpu_to_le16(
3415 + min_t(int, SMB2_MAX_CREDITS_AVAILABLE -
3416 + server->credits, 2));
3417 + spin_unlock(&server->req_lock);
3418 + } else {
3419 + hdr->CreditRequest = cpu_to_le16(2);
3420 + }
3421 hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
3422
3423 if (!tcon)
3424 @@ -593,6 +607,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
3425 char *security_blob = NULL;
3426 unsigned char *ntlmssp_blob = NULL;
3427 bool use_spnego = false; /* else use raw ntlmssp */
3428 + u64 previous_session = ses->Suid;
3429
3430 cifs_dbg(FYI, "Session Setup\n");
3431
3432 @@ -630,6 +645,10 @@ ssetup_ntlmssp_authenticate:
3433 return rc;
3434
3435 req->hdr.SessionId = 0; /* First session, not a reauthenticate */
3436 +
3437 + /* if reconnect, we need to send previous sess id, otherwise it is 0 */
3438 + req->PreviousSessionId = previous_session;
3439 +
3440 req->Flags = 0; /* MBZ */
3441 /* to enable echos and oplocks */
3442 req->hdr.CreditRequest = cpu_to_le16(3);
3443 @@ -1167,7 +1186,7 @@ create_durable_v2_buf(struct cifs_fid *pfid)
3444
3445 buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
3446 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
3447 - get_random_bytes(buf->dcontext.CreateGuid, 16);
3448 + generate_random_uuid(buf->dcontext.CreateGuid);
3449 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
3450
3451 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
3452 @@ -2059,6 +2078,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
3453 if (rdata->credits) {
3454 buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
3455 SMB2_MAX_BUFFER_SIZE));
3456 + buf->CreditRequest = buf->CreditCharge;
3457 spin_lock(&server->req_lock);
3458 server->credits += rdata->credits -
3459 le16_to_cpu(buf->CreditCharge);
3460 @@ -2245,6 +2265,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
3461 if (wdata->credits) {
3462 req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
3463 SMB2_MAX_BUFFER_SIZE));
3464 + req->hdr.CreditRequest = req->hdr.CreditCharge;
3465 spin_lock(&server->req_lock);
3466 server->credits += wdata->credits -
3467 le16_to_cpu(req->hdr.CreditCharge);
3468 diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
3469 index 4af52780ec35..b8f553b32dda 100644
3470 --- a/fs/cifs/smb2pdu.h
3471 +++ b/fs/cifs/smb2pdu.h
3472 @@ -276,7 +276,7 @@ struct smb2_sess_setup_req {
3473 __le32 Channel;
3474 __le16 SecurityBufferOffset;
3475 __le16 SecurityBufferLength;
3476 - __le64 PreviousSessionId;
3477 + __u64 PreviousSessionId;
3478 __u8 Buffer[1]; /* variable length GSS security buffer */
3479 } __packed;
3480
3481 diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
3482 index 1420a3c614af..5d09ea585840 100644
3483 --- a/fs/ext4/sysfs.c
3484 +++ b/fs/ext4/sysfs.c
3485 @@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
3486 EXT4_ATTR_FEATURE(lazy_itable_init);
3487 EXT4_ATTR_FEATURE(batched_discard);
3488 EXT4_ATTR_FEATURE(meta_bg_resize);
3489 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
3490 EXT4_ATTR_FEATURE(encryption);
3491 +#endif
3492 EXT4_ATTR_FEATURE(metadata_csum_seed);
3493
3494 static struct attribute *ext4_feat_attrs[] = {
3495 ATTR_LIST(lazy_itable_init),
3496 ATTR_LIST(batched_discard),
3497 ATTR_LIST(meta_bg_resize),
3498 +#ifdef CONFIG_EXT4_FS_ENCRYPTION
3499 ATTR_LIST(encryption),
3500 +#endif
3501 ATTR_LIST(metadata_csum_seed),
3502 NULL,
3503 };
3504 diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
3505 index d67a16f2a45d..350f67fb5b9c 100644
3506 --- a/fs/isofs/inode.c
3507 +++ b/fs/isofs/inode.c
3508 @@ -690,6 +690,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
3509 pri_bh = NULL;
3510
3511 root_found:
3512 + /* We don't support read-write mounts */
3513 + if (!(s->s_flags & MS_RDONLY)) {
3514 + error = -EACCES;
3515 + goto out_freebh;
3516 + }
3517
3518 if (joliet_level && (pri == NULL || !opt.rock)) {
3519 /* This is the case of Joliet with the norock mount flag.
3520 @@ -1503,9 +1508,6 @@ struct inode *__isofs_iget(struct super_block *sb,
3521 static struct dentry *isofs_mount(struct file_system_type *fs_type,
3522 int flags, const char *dev_name, void *data)
3523 {
3524 - /* We don't support read-write mounts */
3525 - if (!(flags & MS_RDONLY))
3526 - return ERR_PTR(-EACCES);
3527 return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
3528 }
3529
3530 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3531 index ca181e81c765..fa1b8e0dcacf 100644
3532 --- a/fs/jbd2/transaction.c
3533 +++ b/fs/jbd2/transaction.c
3534 @@ -1156,6 +1156,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
3535 JBUFFER_TRACE(jh, "file as BJ_Reserved");
3536 spin_lock(&journal->j_list_lock);
3537 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
3538 + spin_unlock(&journal->j_list_lock);
3539 } else if (jh->b_transaction == journal->j_committing_transaction) {
3540 /* first access by this transaction */
3541 jh->b_modified = 0;
3542 @@ -1163,8 +1164,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
3543 JBUFFER_TRACE(jh, "set next transaction");
3544 spin_lock(&journal->j_list_lock);
3545 jh->b_next_transaction = transaction;
3546 + spin_unlock(&journal->j_list_lock);
3547 }
3548 - spin_unlock(&journal->j_list_lock);
3549 jbd_unlock_bh_state(bh);
3550
3551 /*
3552 diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
3553 index 5166adcfc0fb..7af5eeabc80e 100644
3554 --- a/fs/nfs/delegation.c
3555 +++ b/fs/nfs/delegation.c
3556 @@ -41,6 +41,17 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
3557 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
3558 }
3559
3560 +static bool
3561 +nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
3562 + fmode_t flags)
3563 +{
3564 + if (delegation != NULL && (delegation->type & flags) == flags &&
3565 + !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
3566 + !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
3567 + return true;
3568 + return false;
3569 +}
3570 +
3571 static int
3572 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
3573 {
3574 @@ -50,8 +61,7 @@ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
3575 flags &= FMODE_READ|FMODE_WRITE;
3576 rcu_read_lock();
3577 delegation = rcu_dereference(NFS_I(inode)->delegation);
3578 - if (delegation != NULL && (delegation->type & flags) == flags &&
3579 - !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
3580 + if (nfs4_is_valid_delegation(delegation, flags)) {
3581 if (mark)
3582 nfs_mark_delegation_referenced(delegation);
3583 ret = 1;
3584 @@ -892,7 +902,7 @@ bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
3585 flags &= FMODE_READ|FMODE_WRITE;
3586 rcu_read_lock();
3587 delegation = rcu_dereference(nfsi->delegation);
3588 - ret = (delegation != NULL && (delegation->type & flags) == flags);
3589 + ret = nfs4_is_valid_delegation(delegation, flags);
3590 if (ret) {
3591 nfs4_stateid_copy(dst, &delegation->stateid);
3592 nfs_mark_delegation_referenced(delegation);
3593 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
3594 index 6b1ce9825430..7f1a0fb8c493 100644
3595 --- a/fs/nfs/nfs42proc.c
3596 +++ b/fs/nfs/nfs42proc.c
3597 @@ -269,6 +269,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
3598 task = rpc_run_task(&task_setup);
3599 if (IS_ERR(task))
3600 return PTR_ERR(task);
3601 + rpc_put_task(task);
3602 return 0;
3603 }
3604
3605 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
3606 index d854693a15b0..82dc3035ea45 100644
3607 --- a/fs/nfs/nfs4state.c
3608 +++ b/fs/nfs/nfs4state.c
3609 @@ -1493,6 +1493,9 @@ restart:
3610 __func__, status);
3611 case -ENOENT:
3612 case -ENOMEM:
3613 + case -EACCES:
3614 + case -EROFS:
3615 + case -EIO:
3616 case -ESTALE:
3617 /* Open state on this file cannot be recovered */
3618 nfs4_state_mark_recovery_failed(state, status);
3619 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
3620 index 9e52609cd683..ea0dd9ee138d 100644
3621 --- a/fs/overlayfs/copy_up.c
3622 +++ b/fs/overlayfs/copy_up.c
3623 @@ -25,6 +25,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
3624 ssize_t list_size, size, value_size = 0;
3625 char *buf, *name, *value = NULL;
3626 int uninitialized_var(error);
3627 + size_t slen;
3628
3629 if (!old->d_inode->i_op->getxattr ||
3630 !new->d_inode->i_op->getxattr)
3631 @@ -47,7 +48,16 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
3632 goto out;
3633 }
3634
3635 - for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
3636 + for (name = buf; list_size; name += slen) {
3637 + slen = strnlen(name, list_size) + 1;
3638 +
3639 + /* underlying fs providing us with an broken xattr list? */
3640 + if (WARN_ON(slen > list_size)) {
3641 + error = -EIO;
3642 + break;
3643 + }
3644 + list_size -= slen;
3645 +
3646 if (ovl_is_private_xattr(name))
3647 continue;
3648 retry:
3649 diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
3650 index ba5ef733951f..327177df03a5 100644
3651 --- a/fs/overlayfs/dir.c
3652 +++ b/fs/overlayfs/dir.c
3653 @@ -12,6 +12,7 @@
3654 #include <linux/xattr.h>
3655 #include <linux/security.h>
3656 #include <linux/cred.h>
3657 +#include <linux/atomic.h>
3658 #include "overlayfs.h"
3659
3660 void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
3661 @@ -35,8 +36,10 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
3662 {
3663 struct dentry *temp;
3664 char name[20];
3665 + static atomic_t temp_id = ATOMIC_INIT(0);
3666
3667 - snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
3668 + /* counter is allowed to wrap, since temp dentries are ephemeral */
3669 + snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
3670
3671 temp = lookup_one_len(name, workdir, strlen(name));
3672 if (!IS_ERR(temp) && temp->d_inode) {
3673 diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
3674 index 319c3a60cfa5..905caba36529 100644
3675 --- a/fs/pstore/ram.c
3676 +++ b/fs/pstore/ram.c
3677 @@ -375,13 +375,14 @@ static void ramoops_free_przs(struct ramoops_context *cxt)
3678 {
3679 int i;
3680
3681 - cxt->max_dump_cnt = 0;
3682 if (!cxt->przs)
3683 return;
3684
3685 - for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
3686 + for (i = 0; i < cxt->max_dump_cnt; i++)
3687 persistent_ram_free(cxt->przs[i]);
3688 +
3689 kfree(cxt->przs);
3690 + cxt->max_dump_cnt = 0;
3691 }
3692
3693 static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3694 @@ -406,7 +407,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3695 GFP_KERNEL);
3696 if (!cxt->przs) {
3697 dev_err(dev, "failed to initialize a prz array for dumps\n");
3698 - goto fail_prz;
3699 + goto fail_mem;
3700 }
3701
3702 for (i = 0; i < cxt->max_dump_cnt; i++) {
3703 @@ -417,6 +418,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3704 err = PTR_ERR(cxt->przs[i]);
3705 dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
3706 cxt->record_size, (unsigned long long)*paddr, err);
3707 +
3708 + while (i > 0) {
3709 + i--;
3710 + persistent_ram_free(cxt->przs[i]);
3711 + }
3712 goto fail_prz;
3713 }
3714 *paddr += cxt->record_size;
3715 @@ -424,7 +430,9 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
3716
3717 return 0;
3718 fail_prz:
3719 - ramoops_free_przs(cxt);
3720 + kfree(cxt->przs);
3721 +fail_mem:
3722 + cxt->max_dump_cnt = 0;
3723 return err;
3724 }
3725
3726 @@ -583,7 +591,6 @@ static int ramoops_remove(struct platform_device *pdev)
3727 struct ramoops_context *cxt = &oops_cxt;
3728
3729 pstore_unregister(&cxt->pstore);
3730 - cxt->max_dump_cnt = 0;
3731
3732 kfree(cxt->pstore.buf);
3733 cxt->pstore.bufsize = 0;
3734 diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
3735 index 76c3f80efdfa..364d2dffe5a6 100644
3736 --- a/fs/pstore/ram_core.c
3737 +++ b/fs/pstore/ram_core.c
3738 @@ -47,43 +47,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
3739 return atomic_read(&prz->buffer->start);
3740 }
3741
3742 -/* increase and wrap the start pointer, returning the old value */
3743 -static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
3744 -{
3745 - int old;
3746 - int new;
3747 -
3748 - do {
3749 - old = atomic_read(&prz->buffer->start);
3750 - new = old + a;
3751 - while (unlikely(new >= prz->buffer_size))
3752 - new -= prz->buffer_size;
3753 - } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
3754 -
3755 - return old;
3756 -}
3757 -
3758 -/* increase the size counter until it hits the max size */
3759 -static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
3760 -{
3761 - size_t old;
3762 - size_t new;
3763 -
3764 - if (atomic_read(&prz->buffer->size) == prz->buffer_size)
3765 - return;
3766 -
3767 - do {
3768 - old = atomic_read(&prz->buffer->size);
3769 - new = old + a;
3770 - if (new > prz->buffer_size)
3771 - new = prz->buffer_size;
3772 - } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
3773 -}
3774 -
3775 static DEFINE_RAW_SPINLOCK(buffer_lock);
3776
3777 /* increase and wrap the start pointer, returning the old value */
3778 -static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
3779 +static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
3780 {
3781 int old;
3782 int new;
3783 @@ -103,7 +70,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
3784 }
3785
3786 /* increase the size counter until it hits the max size */
3787 -static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
3788 +static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
3789 {
3790 size_t old;
3791 size_t new;
3792 @@ -124,9 +91,6 @@ exit:
3793 raw_spin_unlock_irqrestore(&buffer_lock, flags);
3794 }
3795
3796 -static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
3797 -static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
3798 -
3799 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
3800 uint8_t *data, size_t len, uint8_t *ecc)
3801 {
3802 @@ -299,7 +263,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
3803 const void *s, unsigned int start, unsigned int count)
3804 {
3805 struct persistent_ram_buffer *buffer = prz->buffer;
3806 - memcpy(buffer->data + start, s, count);
3807 + memcpy_toio(buffer->data + start, s, count);
3808 persistent_ram_update_ecc(prz, start, count);
3809 }
3810
3811 @@ -322,8 +286,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz)
3812 }
3813
3814 prz->old_log_size = size;
3815 - memcpy(prz->old_log, &buffer->data[start], size - start);
3816 - memcpy(prz->old_log + size - start, &buffer->data[0], start);
3817 + memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
3818 + memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
3819 }
3820
3821 int notrace persistent_ram_write(struct persistent_ram_zone *prz,
3822 @@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
3823 return NULL;
3824 }
3825
3826 - buffer_start_add = buffer_start_add_locked;
3827 - buffer_size_add = buffer_size_add_locked;
3828 -
3829 if (memtype)
3830 va = ioremap(start, size);
3831 else
3832 diff --git a/fs/super.c b/fs/super.c
3833 index f5f4b328f860..d4d2591b77c8 100644
3834 --- a/fs/super.c
3835 +++ b/fs/super.c
3836 @@ -1326,8 +1326,8 @@ int freeze_super(struct super_block *sb)
3837 }
3838 }
3839 /*
3840 - * This is just for debugging purposes so that fs can warn if it
3841 - * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
3842 + * For debugging purposes so that fs can warn if it sees write activity
3843 + * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
3844 */
3845 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
3846 up_write(&sb->s_umount);
3847 @@ -1346,7 +1346,7 @@ int thaw_super(struct super_block *sb)
3848 int error;
3849
3850 down_write(&sb->s_umount);
3851 - if (sb->s_writers.frozen == SB_UNFROZEN) {
3852 + if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
3853 up_write(&sb->s_umount);
3854 return -EINVAL;
3855 }
3856 diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
3857 index e8b01b721e99..b5bf23b34241 100644
3858 --- a/fs/ubifs/xattr.c
3859 +++ b/fs/ubifs/xattr.c
3860 @@ -173,6 +173,7 @@ out_cancel:
3861 host_ui->xattr_cnt -= 1;
3862 host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
3863 host_ui->xattr_size -= CALC_XATTR_BYTES(size);
3864 + host_ui->xattr_names -= nm->len;
3865 mutex_unlock(&host_ui->ui_mutex);
3866 out_free:
3867 make_bad_inode(inode);
3868 @@ -533,6 +534,7 @@ out_cancel:
3869 host_ui->xattr_cnt += 1;
3870 host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
3871 host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
3872 + host_ui->xattr_names += nm->len;
3873 mutex_unlock(&host_ui->ui_mutex);
3874 ubifs_release_budget(c, &req);
3875 make_bad_inode(inode);
3876 diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
3877 index 0a83a1e648b0..4db00b02ca3f 100644
3878 --- a/include/linux/devfreq-event.h
3879 +++ b/include/linux/devfreq-event.h
3880 @@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
3881 return -EINVAL;
3882 }
3883
3884 -static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
3885 -{
3886 - return ERR_PTR(-EINVAL);
3887 -}
3888 -
3889 static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
3890 struct device *dev, int index)
3891 {
3892 diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
3893 index e98425058f20..54048f336a1f 100644
3894 --- a/include/linux/irqchip/arm-gic-v3.h
3895 +++ b/include/linux/irqchip/arm-gic-v3.h
3896 @@ -218,7 +218,7 @@
3897 #define GITS_BASER_TYPE_SHIFT (56)
3898 #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
3899 #define GITS_BASER_ENTRY_SIZE_SHIFT (48)
3900 -#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
3901 +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
3902 #define GITS_BASER_NonShareable (0UL << 10)
3903 #define GITS_BASER_InnerShareable (1UL << 10)
3904 #define GITS_BASER_OuterShareable (2UL << 10)
3905 diff --git a/include/linux/kernel.h b/include/linux/kernel.h
3906 index e571e592e53a..50220cab738c 100644
3907 --- a/include/linux/kernel.h
3908 +++ b/include/linux/kernel.h
3909 @@ -356,6 +356,7 @@ int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
3910 int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
3911 int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
3912 int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
3913 +int __must_check kstrtobool(const char *s, bool *res);
3914
3915 int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
3916 int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
3917 @@ -367,6 +368,7 @@ int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigne
3918 int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
3919 int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
3920 int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
3921 +int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
3922
3923 static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
3924 {
3925 diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
3926 index f09648d14694..782d4e814e21 100644
3927 --- a/include/linux/lightnvm.h
3928 +++ b/include/linux/lightnvm.h
3929 @@ -1,6 +1,8 @@
3930 #ifndef NVM_H
3931 #define NVM_H
3932
3933 +#include <linux/types.h>
3934 +
3935 enum {
3936 NVM_IO_OK = 0,
3937 NVM_IO_REQUEUE = 1,
3938 @@ -11,10 +13,71 @@ enum {
3939 NVM_IOTYPE_GC = 1,
3940 };
3941
3942 +#define NVM_BLK_BITS (16)
3943 +#define NVM_PG_BITS (16)
3944 +#define NVM_SEC_BITS (8)
3945 +#define NVM_PL_BITS (8)
3946 +#define NVM_LUN_BITS (8)
3947 +#define NVM_CH_BITS (8)
3948 +
3949 +struct ppa_addr {
3950 + /* Generic structure for all addresses */
3951 + union {
3952 + struct {
3953 + u64 blk : NVM_BLK_BITS;
3954 + u64 pg : NVM_PG_BITS;
3955 + u64 sec : NVM_SEC_BITS;
3956 + u64 pl : NVM_PL_BITS;
3957 + u64 lun : NVM_LUN_BITS;
3958 + u64 ch : NVM_CH_BITS;
3959 + } g;
3960 +
3961 + u64 ppa;
3962 + };
3963 +};
3964 +
3965 +struct nvm_rq;
3966 +struct nvm_id;
3967 +struct nvm_dev;
3968 +
3969 +typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
3970 +typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
3971 +typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
3972 +typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
3973 + nvm_l2p_update_fn *, void *);
3974 +typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
3975 + nvm_bb_update_fn *, void *);
3976 +typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
3977 +typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
3978 +typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
3979 +typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
3980 +typedef void (nvm_destroy_dma_pool_fn)(void *);
3981 +typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
3982 + dma_addr_t *);
3983 +typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
3984 +
3985 +struct nvm_dev_ops {
3986 + nvm_id_fn *identity;
3987 + nvm_get_l2p_tbl_fn *get_l2p_tbl;
3988 + nvm_op_bb_tbl_fn *get_bb_tbl;
3989 + nvm_op_set_bb_fn *set_bb_tbl;
3990 +
3991 + nvm_submit_io_fn *submit_io;
3992 + nvm_erase_blk_fn *erase_block;
3993 +
3994 + nvm_create_dma_pool_fn *create_dma_pool;
3995 + nvm_destroy_dma_pool_fn *destroy_dma_pool;
3996 + nvm_dev_dma_alloc_fn *dev_dma_alloc;
3997 + nvm_dev_dma_free_fn *dev_dma_free;
3998 +
3999 + unsigned int max_phys_sect;
4000 +};
4001 +
4002 +
4003 +
4004 #ifdef CONFIG_NVM
4005
4006 #include <linux/blkdev.h>
4007 -#include <linux/types.h>
4008 #include <linux/file.h>
4009 #include <linux/dmapool.h>
4010
4011 @@ -126,29 +189,6 @@ struct nvm_tgt_instance {
4012 #define NVM_VERSION_MINOR 0
4013 #define NVM_VERSION_PATCH 0
4014
4015 -#define NVM_BLK_BITS (16)
4016 -#define NVM_PG_BITS (16)
4017 -#define NVM_SEC_BITS (8)
4018 -#define NVM_PL_BITS (8)
4019 -#define NVM_LUN_BITS (8)
4020 -#define NVM_CH_BITS (8)
4021 -
4022 -struct ppa_addr {
4023 - /* Generic structure for all addresses */
4024 - union {
4025 - struct {
4026 - u64 blk : NVM_BLK_BITS;
4027 - u64 pg : NVM_PG_BITS;
4028 - u64 sec : NVM_SEC_BITS;
4029 - u64 pl : NVM_PL_BITS;
4030 - u64 lun : NVM_LUN_BITS;
4031 - u64 ch : NVM_CH_BITS;
4032 - } g;
4033 -
4034 - u64 ppa;
4035 - };
4036 -};
4037 -
4038 struct nvm_rq {
4039 struct nvm_tgt_instance *ins;
4040 struct nvm_dev *dev;
4041 @@ -182,39 +222,6 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
4042
4043 struct nvm_block;
4044
4045 -typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
4046 -typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
4047 -typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
4048 -typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
4049 - nvm_l2p_update_fn *, void *);
4050 -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
4051 - nvm_bb_update_fn *, void *);
4052 -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
4053 -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
4054 -typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
4055 -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
4056 -typedef void (nvm_destroy_dma_pool_fn)(void *);
4057 -typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
4058 - dma_addr_t *);
4059 -typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
4060 -
4061 -struct nvm_dev_ops {
4062 - nvm_id_fn *identity;
4063 - nvm_get_l2p_tbl_fn *get_l2p_tbl;
4064 - nvm_op_bb_tbl_fn *get_bb_tbl;
4065 - nvm_op_set_bb_fn *set_bb_tbl;
4066 -
4067 - nvm_submit_io_fn *submit_io;
4068 - nvm_erase_blk_fn *erase_block;
4069 -
4070 - nvm_create_dma_pool_fn *create_dma_pool;
4071 - nvm_destroy_dma_pool_fn *destroy_dma_pool;
4072 - nvm_dev_dma_alloc_fn *dev_dma_alloc;
4073 - nvm_dev_dma_free_fn *dev_dma_free;
4074 -
4075 - unsigned int max_phys_sect;
4076 -};
4077 -
4078 struct nvm_lun {
4079 int id;
4080
4081 diff --git a/include/linux/sem.h b/include/linux/sem.h
4082 index 976ce3a19f1b..d0efd6e6c20a 100644
4083 --- a/include/linux/sem.h
4084 +++ b/include/linux/sem.h
4085 @@ -21,6 +21,7 @@ struct sem_array {
4086 struct list_head list_id; /* undo requests on this array */
4087 int sem_nsems; /* no. of semaphores in array */
4088 int complex_count; /* pending complex operations */
4089 + bool complex_mode; /* no parallel simple ops */
4090 };
4091
4092 #ifdef CONFIG_SYSVIPC
4093 diff --git a/include/linux/string.h b/include/linux/string.h
4094 index 9ef7795e65e4..aa30789b0f65 100644
4095 --- a/include/linux/string.h
4096 +++ b/include/linux/string.h
4097 @@ -127,7 +127,11 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
4098 extern void argv_free(char **argv);
4099
4100 extern bool sysfs_streq(const char *s1, const char *s2);
4101 -extern int strtobool(const char *s, bool *res);
4102 +extern int kstrtobool(const char *s, bool *res);
4103 +static inline int strtobool(const char *s, bool *res)
4104 +{
4105 + return kstrtobool(s, res);
4106 +}
4107
4108 #ifdef CONFIG_BINARY_PRINTF
4109 int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
4110 diff --git a/include/linux/swap.h b/include/linux/swap.h
4111 index 7ba7dccaf0e7..d8ca2eaa3a8b 100644
4112 --- a/include/linux/swap.h
4113 +++ b/include/linux/swap.h
4114 @@ -266,6 +266,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
4115
4116 static inline void workingset_node_pages_dec(struct radix_tree_node *node)
4117 {
4118 + VM_WARN_ON_ONCE(!workingset_node_pages(node));
4119 node->count--;
4120 }
4121
4122 @@ -281,6 +282,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
4123
4124 static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
4125 {
4126 + VM_WARN_ON_ONCE(!workingset_node_shadows(node));
4127 node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
4128 }
4129
4130 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
4131 index 59081c73b296..6afc6f388edf 100644
4132 --- a/include/target/target_core_base.h
4133 +++ b/include/target/target_core_base.h
4134 @@ -180,6 +180,7 @@ enum tcm_sense_reason_table {
4135 TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
4136 TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
4137 TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
4138 + TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
4139 #undef R
4140 };
4141
4142 diff --git a/ipc/sem.c b/ipc/sem.c
4143 index 20d07008ad5e..9862c3d1c26d 100644
4144 --- a/ipc/sem.c
4145 +++ b/ipc/sem.c
4146 @@ -155,14 +155,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
4147
4148 /*
4149 * Locking:
4150 + * a) global sem_lock() for read/write
4151 * sem_undo.id_next,
4152 * sem_array.complex_count,
4153 - * sem_array.pending{_alter,_cont},
4154 - * sem_array.sem_undo: global sem_lock() for read/write
4155 - * sem_undo.proc_next: only "current" is allowed to read/write that field.
4156 + * sem_array.complex_mode
4157 + * sem_array.pending{_alter,_const},
4158 + * sem_array.sem_undo
4159 *
4160 + * b) global or semaphore sem_lock() for read/write:
4161 * sem_array.sem_base[i].pending_{const,alter}:
4162 - * global or semaphore sem_lock() for read/write
4163 + * sem_array.complex_mode (for read)
4164 + *
4165 + * c) special:
4166 + * sem_undo_list.list_proc:
4167 + * * undo_list->lock for write
4168 + * * rcu for read
4169 */
4170
4171 #define sc_semmsl sem_ctls[0]
4172 @@ -263,24 +270,25 @@ static void sem_rcu_free(struct rcu_head *head)
4173 #define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
4174
4175 /*
4176 - * Wait until all currently ongoing simple ops have completed.
4177 + * Enter the mode suitable for non-simple operations:
4178 * Caller must own sem_perm.lock.
4179 - * New simple ops cannot start, because simple ops first check
4180 - * that sem_perm.lock is free.
4181 - * that a) sem_perm.lock is free and b) complex_count is 0.
4182 */
4183 -static void sem_wait_array(struct sem_array *sma)
4184 +static void complexmode_enter(struct sem_array *sma)
4185 {
4186 int i;
4187 struct sem *sem;
4188
4189 - if (sma->complex_count) {
4190 - /* The thread that increased sma->complex_count waited on
4191 - * all sem->lock locks. Thus we don't need to wait again.
4192 - */
4193 + if (sma->complex_mode) {
4194 + /* We are already in complex_mode. Nothing to do */
4195 return;
4196 }
4197
4198 + /* We need a full barrier after seting complex_mode:
4199 + * The write to complex_mode must be visible
4200 + * before we read the first sem->lock spinlock state.
4201 + */
4202 + smp_store_mb(sma->complex_mode, true);
4203 +
4204 for (i = 0; i < sma->sem_nsems; i++) {
4205 sem = sma->sem_base + i;
4206 spin_unlock_wait(&sem->lock);
4207 @@ -289,6 +297,28 @@ static void sem_wait_array(struct sem_array *sma)
4208 }
4209
4210 /*
4211 + * Try to leave the mode that disallows simple operations:
4212 + * Caller must own sem_perm.lock.
4213 + */
4214 +static void complexmode_tryleave(struct sem_array *sma)
4215 +{
4216 + if (sma->complex_count) {
4217 + /* Complex ops are sleeping.
4218 + * We must stay in complex mode
4219 + */
4220 + return;
4221 + }
4222 + /*
4223 + * Immediately after setting complex_mode to false,
4224 + * a simple op can start. Thus: all memory writes
4225 + * performed by the current operation must be visible
4226 + * before we set complex_mode to false.
4227 + */
4228 + smp_store_release(&sma->complex_mode, false);
4229 +}
4230 +
4231 +#define SEM_GLOBAL_LOCK (-1)
4232 +/*
4233 * If the request contains only one semaphore operation, and there are
4234 * no complex transactions pending, lock only the semaphore involved.
4235 * Otherwise, lock the entire semaphore array, since we either have
4236 @@ -304,56 +334,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
4237 /* Complex operation - acquire a full lock */
4238 ipc_lock_object(&sma->sem_perm);
4239
4240 - /* And wait until all simple ops that are processed
4241 - * right now have dropped their locks.
4242 - */
4243 - sem_wait_array(sma);
4244 - return -1;
4245 + /* Prevent parallel simple ops */
4246 + complexmode_enter(sma);
4247 + return SEM_GLOBAL_LOCK;
4248 }
4249
4250 /*
4251 * Only one semaphore affected - try to optimize locking.
4252 - * The rules are:
4253 - * - optimized locking is possible if no complex operation
4254 - * is either enqueued or processed right now.
4255 - * - The test for enqueued complex ops is simple:
4256 - * sma->complex_count != 0
4257 - * - Testing for complex ops that are processed right now is
4258 - * a bit more difficult. Complex ops acquire the full lock
4259 - * and first wait that the running simple ops have completed.
4260 - * (see above)
4261 - * Thus: If we own a simple lock and the global lock is free
4262 - * and complex_count is now 0, then it will stay 0 and
4263 - * thus just locking sem->lock is sufficient.
4264 + * Optimized locking is possible if no complex operation
4265 + * is either enqueued or processed right now.
4266 + *
4267 + * Both facts are tracked by complex_mode.
4268 */
4269 sem = sma->sem_base + sops->sem_num;
4270
4271 - if (sma->complex_count == 0) {
4272 + /*
4273 + * Initial check for complex_mode. Just an optimization,
4274 + * no locking, no memory barrier.
4275 + */
4276 + if (!sma->complex_mode) {
4277 /*
4278 * It appears that no complex operation is around.
4279 * Acquire the per-semaphore lock.
4280 */
4281 spin_lock(&sem->lock);
4282
4283 - /* Then check that the global lock is free */
4284 - if (!spin_is_locked(&sma->sem_perm.lock)) {
4285 - /*
4286 - * We need a memory barrier with acquire semantics,
4287 - * otherwise we can race with another thread that does:
4288 - * complex_count++;
4289 - * spin_unlock(sem_perm.lock);
4290 - */
4291 - ipc_smp_acquire__after_spin_is_unlocked();
4292 + /*
4293 + * See 51d7d5205d33
4294 + * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
4295 + * A full barrier is required: the write of sem->lock
4296 + * must be visible before the read is executed
4297 + */
4298 + smp_mb();
4299
4300 - /*
4301 - * Now repeat the test of complex_count:
4302 - * It can't change anymore until we drop sem->lock.
4303 - * Thus: if is now 0, then it will stay 0.
4304 - */
4305 - if (sma->complex_count == 0) {
4306 - /* fast path successful! */
4307 - return sops->sem_num;
4308 - }
4309 + if (!smp_load_acquire(&sma->complex_mode)) {
4310 + /* fast path successful! */
4311 + return sops->sem_num;
4312 }
4313 spin_unlock(&sem->lock);
4314 }
4315 @@ -373,15 +389,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
4316 /* Not a false alarm, thus complete the sequence for a
4317 * full lock.
4318 */
4319 - sem_wait_array(sma);
4320 - return -1;
4321 + complexmode_enter(sma);
4322 + return SEM_GLOBAL_LOCK;
4323 }
4324 }
4325
4326 static inline void sem_unlock(struct sem_array *sma, int locknum)
4327 {
4328 - if (locknum == -1) {
4329 + if (locknum == SEM_GLOBAL_LOCK) {
4330 unmerge_queues(sma);
4331 + complexmode_tryleave(sma);
4332 ipc_unlock_object(&sma->sem_perm);
4333 } else {
4334 struct sem *sem = sma->sem_base + locknum;
4335 @@ -533,6 +550,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
4336 }
4337
4338 sma->complex_count = 0;
4339 + sma->complex_mode = true; /* dropped by sem_unlock below */
4340 INIT_LIST_HEAD(&sma->pending_alter);
4341 INIT_LIST_HEAD(&sma->pending_const);
4342 INIT_LIST_HEAD(&sma->list_id);
4343 @@ -2186,10 +2204,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
4344 /*
4345 * The proc interface isn't aware of sem_lock(), it calls
4346 * ipc_lock_object() directly (in sysvipc_find_ipc).
4347 - * In order to stay compatible with sem_lock(), we must wait until
4348 - * all simple semop() calls have left their critical regions.
4349 + * In order to stay compatible with sem_lock(), we must
4350 + * enter / leave complex_mode.
4351 */
4352 - sem_wait_array(sma);
4353 + complexmode_enter(sma);
4354
4355 sem_otime = get_semotime(sma);
4356
4357 @@ -2206,6 +2224,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
4358 sem_otime,
4359 sma->sem_ctime);
4360
4361 + complexmode_tryleave(sma);
4362 +
4363 return 0;
4364 }
4365 #endif
4366 diff --git a/lib/kstrtox.c b/lib/kstrtox.c
4367 index 94be244e8441..d8a5cf66c316 100644
4368 --- a/lib/kstrtox.c
4369 +++ b/lib/kstrtox.c
4370 @@ -321,6 +321,70 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
4371 }
4372 EXPORT_SYMBOL(kstrtos8);
4373
4374 +/**
4375 + * kstrtobool - convert common user inputs into boolean values
4376 + * @s: input string
4377 + * @res: result
4378 + *
4379 + * This routine returns 0 iff the first character is one of 'Yy1Nn0', or
4380 + * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value
4381 + * pointed to by res is updated upon finding a match.
4382 + */
4383 +int kstrtobool(const char *s, bool *res)
4384 +{
4385 + if (!s)
4386 + return -EINVAL;
4387 +
4388 + switch (s[0]) {
4389 + case 'y':
4390 + case 'Y':
4391 + case '1':
4392 + *res = true;
4393 + return 0;
4394 + case 'n':
4395 + case 'N':
4396 + case '0':
4397 + *res = false;
4398 + return 0;
4399 + case 'o':
4400 + case 'O':
4401 + switch (s[1]) {
4402 + case 'n':
4403 + case 'N':
4404 + *res = true;
4405 + return 0;
4406 + case 'f':
4407 + case 'F':
4408 + *res = false;
4409 + return 0;
4410 + default:
4411 + break;
4412 + }
4413 + default:
4414 + break;
4415 + }
4416 +
4417 + return -EINVAL;
4418 +}
4419 +EXPORT_SYMBOL(kstrtobool);
4420 +
4421 +/*
4422 + * Since "base" would be a nonsense argument, this open-codes the
4423 + * _from_user helper instead of using the helper macro below.
4424 + */
4425 +int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
4426 +{
4427 + /* Longest string needed to differentiate, newline, terminator */
4428 + char buf[4];
4429 +
4430 + count = min(count, sizeof(buf) - 1);
4431 + if (copy_from_user(buf, s, count))
4432 + return -EFAULT;
4433 + buf[count] = '\0';
4434 + return kstrtobool(buf, res);
4435 +}
4436 +EXPORT_SYMBOL(kstrtobool_from_user);
4437 +
4438 #define kstrto_from_user(f, g, type) \
4439 int f(const char __user *s, size_t count, unsigned int base, type *res) \
4440 { \
4441 diff --git a/lib/string.c b/lib/string.c
4442 index 0323c0d5629a..1a90db9bc6e1 100644
4443 --- a/lib/string.c
4444 +++ b/lib/string.c
4445 @@ -630,35 +630,6 @@ bool sysfs_streq(const char *s1, const char *s2)
4446 }
4447 EXPORT_SYMBOL(sysfs_streq);
4448
4449 -/**
4450 - * strtobool - convert common user inputs into boolean values
4451 - * @s: input string
4452 - * @res: result
4453 - *
4454 - * This routine returns 0 iff the first character is one of 'Yy1Nn0'.
4455 - * Otherwise it will return -EINVAL. Value pointed to by res is
4456 - * updated upon finding a match.
4457 - */
4458 -int strtobool(const char *s, bool *res)
4459 -{
4460 - switch (s[0]) {
4461 - case 'y':
4462 - case 'Y':
4463 - case '1':
4464 - *res = true;
4465 - break;
4466 - case 'n':
4467 - case 'N':
4468 - case '0':
4469 - *res = false;
4470 - break;
4471 - default:
4472 - return -EINVAL;
4473 - }
4474 - return 0;
4475 -}
4476 -EXPORT_SYMBOL(strtobool);
4477 -
4478 #ifndef __HAVE_ARCH_MEMSET
4479 /**
4480 * memset - Fill a region of memory with the given value
4481 diff --git a/mm/filemap.c b/mm/filemap.c
4482 index 1bb007624b53..c588d1222b2a 100644
4483 --- a/mm/filemap.c
4484 +++ b/mm/filemap.c
4485 @@ -109,6 +109,48 @@
4486 * ->tasklist_lock (memory_failure, collect_procs_ao)
4487 */
4488
4489 +static int page_cache_tree_insert(struct address_space *mapping,
4490 + struct page *page, void **shadowp)
4491 +{
4492 + struct radix_tree_node *node;
4493 + void **slot;
4494 + int error;
4495 +
4496 + error = __radix_tree_create(&mapping->page_tree, page->index,
4497 + &node, &slot);
4498 + if (error)
4499 + return error;
4500 + if (*slot) {
4501 + void *p;
4502 +
4503 + p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
4504 + if (!radix_tree_exceptional_entry(p))
4505 + return -EEXIST;
4506 + if (shadowp)
4507 + *shadowp = p;
4508 + mapping->nrshadows--;
4509 + if (node)
4510 + workingset_node_shadows_dec(node);
4511 + }
4512 + radix_tree_replace_slot(slot, page);
4513 + mapping->nrpages++;
4514 + if (node) {
4515 + workingset_node_pages_inc(node);
4516 + /*
4517 + * Don't track node that contains actual pages.
4518 + *
4519 + * Avoid acquiring the list_lru lock if already
4520 + * untracked. The list_empty() test is safe as
4521 + * node->private_list is protected by
4522 + * mapping->tree_lock.
4523 + */
4524 + if (!list_empty(&node->private_list))
4525 + list_lru_del(&workingset_shadow_nodes,
4526 + &node->private_list);
4527 + }
4528 + return 0;
4529 +}
4530 +
4531 static void page_cache_tree_delete(struct address_space *mapping,
4532 struct page *page, void *shadow)
4533 {
4534 @@ -122,6 +164,14 @@ static void page_cache_tree_delete(struct address_space *mapping,
4535
4536 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
4537
4538 + if (!node) {
4539 + /*
4540 + * We need a node to properly account shadow
4541 + * entries. Don't plant any without. XXX
4542 + */
4543 + shadow = NULL;
4544 + }
4545 +
4546 if (shadow) {
4547 mapping->nrshadows++;
4548 /*
4549 @@ -538,9 +588,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4550 memcg = mem_cgroup_begin_page_stat(old);
4551 spin_lock_irqsave(&mapping->tree_lock, flags);
4552 __delete_from_page_cache(old, NULL, memcg);
4553 - error = radix_tree_insert(&mapping->page_tree, offset, new);
4554 + error = page_cache_tree_insert(mapping, new, NULL);
4555 BUG_ON(error);
4556 - mapping->nrpages++;
4557
4558 /*
4559 * hugetlb pages do not participate in page cache accounting.
4560 @@ -562,48 +611,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
4561 }
4562 EXPORT_SYMBOL_GPL(replace_page_cache_page);
4563
4564 -static int page_cache_tree_insert(struct address_space *mapping,
4565 - struct page *page, void **shadowp)
4566 -{
4567 - struct radix_tree_node *node;
4568 - void **slot;
4569 - int error;
4570 -
4571 - error = __radix_tree_create(&mapping->page_tree, page->index,
4572 - &node, &slot);
4573 - if (error)
4574 - return error;
4575 - if (*slot) {
4576 - void *p;
4577 -
4578 - p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
4579 - if (!radix_tree_exceptional_entry(p))
4580 - return -EEXIST;
4581 - if (shadowp)
4582 - *shadowp = p;
4583 - mapping->nrshadows--;
4584 - if (node)
4585 - workingset_node_shadows_dec(node);
4586 - }
4587 - radix_tree_replace_slot(slot, page);
4588 - mapping->nrpages++;
4589 - if (node) {
4590 - workingset_node_pages_inc(node);
4591 - /*
4592 - * Don't track node that contains actual pages.
4593 - *
4594 - * Avoid acquiring the list_lru lock if already
4595 - * untracked. The list_empty() test is safe as
4596 - * node->private_list is protected by
4597 - * mapping->tree_lock.
4598 - */
4599 - if (!list_empty(&node->private_list))
4600 - list_lru_del(&workingset_shadow_nodes,
4601 - &node->private_list);
4602 - }
4603 - return 0;
4604 -}
4605 -
4606 static int __add_to_page_cache_locked(struct page *page,
4607 struct address_space *mapping,
4608 pgoff_t offset, gfp_t gfp_mask,
4609 diff --git a/mm/workingset.c b/mm/workingset.c
4610 index aa017133744b..df66f426fdcf 100644
4611 --- a/mm/workingset.c
4612 +++ b/mm/workingset.c
4613 @@ -341,21 +341,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
4614 * no pages, so we expect to be able to remove them all and
4615 * delete and free the empty node afterwards.
4616 */
4617 -
4618 - BUG_ON(!node->count);
4619 - BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
4620 + BUG_ON(!workingset_node_shadows(node));
4621 + BUG_ON(workingset_node_pages(node));
4622
4623 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
4624 if (node->slots[i]) {
4625 BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
4626 node->slots[i] = NULL;
4627 - BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT));
4628 - node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
4629 + workingset_node_shadows_dec(node);
4630 BUG_ON(!mapping->nrshadows);
4631 mapping->nrshadows--;
4632 }
4633 }
4634 - BUG_ON(node->count);
4635 + BUG_ON(workingset_node_shadows(node));
4636 inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
4637 if (!__radix_tree_delete_node(&mapping->page_tree, node))
4638 BUG();
4639 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
4640 index 1ba417207465..27b6f55fa43a 100644
4641 --- a/net/sunrpc/xprtsock.c
4642 +++ b/net/sunrpc/xprtsock.c
4643 @@ -474,7 +474,16 @@ static int xs_nospace(struct rpc_task *task)
4644 spin_unlock_bh(&xprt->transport_lock);
4645
4646 /* Race breaker in case memory is freed before above code is called */
4647 - sk->sk_write_space(sk);
4648 + if (ret == -EAGAIN) {
4649 + struct socket_wq *wq;
4650 +
4651 + rcu_read_lock();
4652 + wq = rcu_dereference(sk->sk_wq);
4653 + set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
4654 + rcu_read_unlock();
4655 +
4656 + sk->sk_write_space(sk);
4657 + }
4658 return ret;
4659 }
4660
4661 diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
4662 index 9c22f95838ef..19d41da79f93 100644
4663 --- a/sound/pci/hda/dell_wmi_helper.c
4664 +++ b/sound/pci/hda/dell_wmi_helper.c
4665 @@ -49,7 +49,7 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec,
4666 removefunc = true;
4667 if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) {
4668 dell_led_value = 0;
4669 - if (spec->gen.num_adc_nids > 1)
4670 + if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch)
4671 codec_dbg(codec, "Skipping micmute LED control due to several ADCs");
4672 else {
4673 dell_old_cap_hook = spec->gen.cap_sync_hook;
4674 diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
4675 index 0a4ad5feb82e..12826ac0381f 100644
4676 --- a/sound/pci/hda/thinkpad_helper.c
4677 +++ b/sound/pci/hda/thinkpad_helper.c
4678 @@ -75,7 +75,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
4679 removefunc = false;
4680 }
4681 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
4682 - if (spec->num_adc_nids > 1)
4683 + if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch)
4684 codec_dbg(codec,
4685 "Skipping micmute LED control due to several ADCs");
4686 else {
4687 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4688 index 9409d014b46c..71df7acf8643 100644
4689 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4690 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4691 @@ -89,6 +89,7 @@ struct intel_pt_decoder {
4692 bool pge;
4693 bool have_tma;
4694 bool have_cyc;
4695 + bool fixup_last_mtc;
4696 uint64_t pos;
4697 uint64_t last_ip;
4698 uint64_t ip;
4699 @@ -584,10 +585,31 @@ struct intel_pt_calc_cyc_to_tsc_info {
4700 uint64_t tsc_timestamp;
4701 uint64_t timestamp;
4702 bool have_tma;
4703 + bool fixup_last_mtc;
4704 bool from_mtc;
4705 double cbr_cyc_to_tsc;
4706 };
4707
4708 +/*
4709 + * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
4710 + * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
4711 + * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
4712 + * packet by copying the missing bits from the current MTC assuming the least
4713 + * difference between the two, and that the current MTC comes after last_mtc.
4714 + */
4715 +static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
4716 + uint32_t *last_mtc)
4717 +{
4718 + uint32_t first_missing_bit = 1U << (16 - mtc_shift);
4719 + uint32_t mask = ~(first_missing_bit - 1);
4720 +
4721 + *last_mtc |= mtc & mask;
4722 + if (*last_mtc >= mtc) {
4723 + *last_mtc -= first_missing_bit;
4724 + *last_mtc &= 0xff;
4725 + }
4726 +}
4727 +
4728 static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
4729 {
4730 struct intel_pt_decoder *decoder = pkt_info->decoder;
4731 @@ -617,6 +639,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
4732 return 0;
4733
4734 mtc = pkt_info->packet.payload;
4735 + if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
4736 + data->fixup_last_mtc = false;
4737 + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
4738 + &data->last_mtc);
4739 + }
4740 if (mtc > data->last_mtc)
4741 mtc_delta = mtc - data->last_mtc;
4742 else
4743 @@ -685,6 +712,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
4744
4745 data->ctc_delta = 0;
4746 data->have_tma = true;
4747 + data->fixup_last_mtc = true;
4748
4749 return 0;
4750
4751 @@ -751,6 +779,7 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
4752 .tsc_timestamp = decoder->tsc_timestamp,
4753 .timestamp = decoder->timestamp,
4754 .have_tma = decoder->have_tma,
4755 + .fixup_last_mtc = decoder->fixup_last_mtc,
4756 .from_mtc = from_mtc,
4757 .cbr_cyc_to_tsc = 0,
4758 };
4759 @@ -1241,6 +1270,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
4760 }
4761 decoder->ctc_delta = 0;
4762 decoder->have_tma = true;
4763 + decoder->fixup_last_mtc = true;
4764 intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
4765 decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
4766 }
4767 @@ -1255,6 +1285,12 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
4768
4769 mtc = decoder->packet.payload;
4770
4771 + if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
4772 + decoder->fixup_last_mtc = false;
4773 + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
4774 + &decoder->last_mtc);
4775 + }
4776 +
4777 if (mtc > decoder->last_mtc)
4778 mtc_delta = mtc - decoder->last_mtc;
4779 else
4780 @@ -1323,6 +1359,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
4781 timestamp, decoder->timestamp);
4782 else
4783 decoder->timestamp = timestamp;
4784 +
4785 + decoder->timestamp_insn_cnt = 0;
4786 }
4787
4788 /* Walk PSB+ packets when already in sync. */
4789 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
4790 index 9227c2f076c3..89927b5beebf 100644
4791 --- a/tools/perf/util/intel-pt.c
4792 +++ b/tools/perf/util/intel-pt.c
4793 @@ -238,7 +238,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
4794 }
4795
4796 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
4797 -
4798 +next:
4799 buffer = auxtrace_buffer__next(queue, buffer);
4800 if (!buffer) {
4801 if (old_buffer)
4802 @@ -261,9 +261,6 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
4803 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
4804 return -ENOMEM;
4805
4806 - if (old_buffer)
4807 - auxtrace_buffer__drop_data(old_buffer);
4808 -
4809 if (buffer->use_data) {
4810 b->len = buffer->use_size;
4811 b->buf = buffer->use_data;
4812 @@ -273,6 +270,16 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
4813 }
4814 b->ref_timestamp = buffer->reference;
4815
4816 + /*
4817 + * If in snapshot mode and the buffer has no usable data, get next
4818 + * buffer and again check overlap against old_buffer.
4819 + */
4820 + if (ptq->pt->snapshot_mode && !b->len)
4821 + goto next;
4822 +
4823 + if (old_buffer)
4824 + auxtrace_buffer__drop_data(old_buffer);
4825 +
4826 if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
4827 !buffer->consecutive)) {
4828 b->consecutive = false;