Contents of /trunk/kernel-magellan/patches-4.8/0104-4.8.5-all-fixes.patch
Parent Directory | Revision Log
Revision 2844 -
(show annotations)
(download)
Tue Nov 22 13:19:31 2016 UTC (7 years, 10 months ago) by niro
File size: 184911 byte(s)
Tue Nov 22 13:19:31 2016 UTC (7 years, 10 months ago) by niro
File size: 184911 byte(s)
-linux-4.8.5
1 | diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl |
2 | index 4ba0a2a61926..640f65e79ef1 100644 |
3 | --- a/Documentation/ABI/testing/sysfs-class-cxl |
4 | +++ b/Documentation/ABI/testing/sysfs-class-cxl |
5 | @@ -220,8 +220,11 @@ What: /sys/class/cxl/<card>/reset |
6 | Date: October 2014 |
7 | Contact: linuxppc-dev@lists.ozlabs.org |
8 | Description: write only |
9 | - Writing 1 will issue a PERST to card which may cause the card |
10 | - to reload the FPGA depending on load_image_on_perst. |
11 | + Writing 1 will issue a PERST to card provided there are no |
12 | + contexts active on any one of the card AFUs. This may cause |
13 | + the card to reload the FPGA depending on load_image_on_perst. |
14 | + Writing -1 will do a force PERST irrespective of any active |
15 | + contexts on the card AFUs. |
16 | Users: https://github.com/ibm-capi/libcxl |
17 | |
18 | What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest) |
19 | diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt |
20 | index a4f4d693e2c1..46726d4899fe 100644 |
21 | --- a/Documentation/kernel-parameters.txt |
22 | +++ b/Documentation/kernel-parameters.txt |
23 | @@ -1457,7 +1457,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted. |
24 | i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX |
25 | controllers |
26 | i8042.notimeout [HW] Ignore timeout condition signalled by controller |
27 | - i8042.reset [HW] Reset the controller during init and cleanup |
28 | + i8042.reset [HW] Reset the controller during init, cleanup and |
29 | + suspend-to-ram transitions, only during s2r |
30 | + transitions, or never reset |
31 | + Format: { 1 | Y | y | 0 | N | n } |
32 | + 1, Y, y: always reset controller |
33 | + 0, N, n: don't ever reset controller |
34 | + Default: only on s2r transitions on x86; most other |
35 | + architectures force reset to be always executed |
36 | i8042.unlock [HW] Unlock (ignore) the keylock |
37 | i8042.kbdreset [HW] Reset device connected to KBD port |
38 | |
39 | diff --git a/Makefile b/Makefile |
40 | index 82a36ab540a4..daa3a01d2525 100644 |
41 | --- a/Makefile |
42 | +++ b/Makefile |
43 | @@ -1,6 +1,6 @@ |
44 | VERSION = 4 |
45 | PATCHLEVEL = 8 |
46 | -SUBLEVEL = 4 |
47 | +SUBLEVEL = 5 |
48 | EXTRAVERSION = |
49 | NAME = Psychotic Stoned Sheep |
50 | |
51 | diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c |
52 | index 6cb3736b6b83..d347bbc086fe 100644 |
53 | --- a/arch/arc/kernel/signal.c |
54 | +++ b/arch/arc/kernel/signal.c |
55 | @@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) |
56 | struct user_regs_struct uregs; |
57 | |
58 | err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); |
59 | - if (!err) |
60 | - set_current_blocked(&set); |
61 | - |
62 | err |= __copy_from_user(&uregs.scratch, |
63 | &(sf->uc.uc_mcontext.regs.scratch), |
64 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
65 | + if (err) |
66 | + return err; |
67 | |
68 | + set_current_blocked(&set); |
69 | regs->bta = uregs.scratch.bta; |
70 | regs->lp_start = uregs.scratch.lp_start; |
71 | regs->lp_end = uregs.scratch.lp_end; |
72 | @@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) |
73 | regs->r0 = uregs.scratch.r0; |
74 | regs->sp = uregs.scratch.sp; |
75 | |
76 | - return err; |
77 | + return 0; |
78 | } |
79 | |
80 | static inline int is_do_ss_needed(unsigned int magic) |
81 | diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h |
82 | index 4cdeae3b17c6..948a9a8a9297 100644 |
83 | --- a/arch/arm64/include/asm/kvm_emulate.h |
84 | +++ b/arch/arm64/include/asm/kvm_emulate.h |
85 | @@ -167,11 +167,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
86 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
87 | } |
88 | |
89 | -static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
90 | -{ |
91 | - return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR); |
92 | -} |
93 | - |
94 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
95 | { |
96 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
97 | @@ -192,6 +187,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
98 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
99 | } |
100 | |
101 | +static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
102 | +{ |
103 | + return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || |
104 | + kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ |
105 | +} |
106 | + |
107 | static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) |
108 | { |
109 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); |
110 | diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h |
111 | index e12af6754634..06ff7fd9e81f 100644 |
112 | --- a/arch/arm64/include/asm/module.h |
113 | +++ b/arch/arm64/include/asm/module.h |
114 | @@ -17,6 +17,7 @@ |
115 | #define __ASM_MODULE_H |
116 | |
117 | #include <asm-generic/module.h> |
118 | +#include <asm/memory.h> |
119 | |
120 | #define MODULE_ARCH_VERMAGIC "aarch64" |
121 | |
122 | @@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela, |
123 | Elf64_Sym *sym); |
124 | |
125 | #ifdef CONFIG_RANDOMIZE_BASE |
126 | +#ifdef CONFIG_MODVERSIONS |
127 | +#define ARCH_RELOCATES_KCRCTAB |
128 | +#define reloc_start (kimage_vaddr - KIMAGE_VADDR) |
129 | +#endif |
130 | extern u64 module_alloc_base; |
131 | #else |
132 | #define module_alloc_base ((u64)_etext - MODULES_VSIZE) |
133 | diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h |
134 | index 2fee2f59288c..5394c8405e66 100644 |
135 | --- a/arch/arm64/include/asm/percpu.h |
136 | +++ b/arch/arm64/include/asm/percpu.h |
137 | @@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \ |
138 | \ |
139 | switch (size) { \ |
140 | case 1: \ |
141 | - do { \ |
142 | - asm ("//__per_cpu_" #op "_1\n" \ |
143 | - "ldxrb %w[ret], %[ptr]\n" \ |
144 | + asm ("//__per_cpu_" #op "_1\n" \ |
145 | + "1: ldxrb %w[ret], %[ptr]\n" \ |
146 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
147 | - "stxrb %w[loop], %w[ret], %[ptr]\n" \ |
148 | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
149 | - [ptr] "+Q"(*(u8 *)ptr) \ |
150 | - : [val] "Ir" (val)); \ |
151 | - } while (loop); \ |
152 | + " stxrb %w[loop], %w[ret], %[ptr]\n" \ |
153 | + " cbnz %w[loop], 1b" \ |
154 | + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
155 | + [ptr] "+Q"(*(u8 *)ptr) \ |
156 | + : [val] "Ir" (val)); \ |
157 | break; \ |
158 | case 2: \ |
159 | - do { \ |
160 | - asm ("//__per_cpu_" #op "_2\n" \ |
161 | - "ldxrh %w[ret], %[ptr]\n" \ |
162 | + asm ("//__per_cpu_" #op "_2\n" \ |
163 | + "1: ldxrh %w[ret], %[ptr]\n" \ |
164 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
165 | - "stxrh %w[loop], %w[ret], %[ptr]\n" \ |
166 | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
167 | - [ptr] "+Q"(*(u16 *)ptr) \ |
168 | - : [val] "Ir" (val)); \ |
169 | - } while (loop); \ |
170 | + " stxrh %w[loop], %w[ret], %[ptr]\n" \ |
171 | + " cbnz %w[loop], 1b" \ |
172 | + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
173 | + [ptr] "+Q"(*(u16 *)ptr) \ |
174 | + : [val] "Ir" (val)); \ |
175 | break; \ |
176 | case 4: \ |
177 | - do { \ |
178 | - asm ("//__per_cpu_" #op "_4\n" \ |
179 | - "ldxr %w[ret], %[ptr]\n" \ |
180 | + asm ("//__per_cpu_" #op "_4\n" \ |
181 | + "1: ldxr %w[ret], %[ptr]\n" \ |
182 | #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
183 | - "stxr %w[loop], %w[ret], %[ptr]\n" \ |
184 | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
185 | - [ptr] "+Q"(*(u32 *)ptr) \ |
186 | - : [val] "Ir" (val)); \ |
187 | - } while (loop); \ |
188 | + " stxr %w[loop], %w[ret], %[ptr]\n" \ |
189 | + " cbnz %w[loop], 1b" \ |
190 | + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
191 | + [ptr] "+Q"(*(u32 *)ptr) \ |
192 | + : [val] "Ir" (val)); \ |
193 | break; \ |
194 | case 8: \ |
195 | - do { \ |
196 | - asm ("//__per_cpu_" #op "_8\n" \ |
197 | - "ldxr %[ret], %[ptr]\n" \ |
198 | + asm ("//__per_cpu_" #op "_8\n" \ |
199 | + "1: ldxr %[ret], %[ptr]\n" \ |
200 | #asm_op " %[ret], %[ret], %[val]\n" \ |
201 | - "stxr %w[loop], %[ret], %[ptr]\n" \ |
202 | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
203 | - [ptr] "+Q"(*(u64 *)ptr) \ |
204 | - : [val] "Ir" (val)); \ |
205 | - } while (loop); \ |
206 | + " stxr %w[loop], %[ret], %[ptr]\n" \ |
207 | + " cbnz %w[loop], 1b" \ |
208 | + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
209 | + [ptr] "+Q"(*(u64 *)ptr) \ |
210 | + : [val] "Ir" (val)); \ |
211 | break; \ |
212 | default: \ |
213 | BUILD_BUG(); \ |
214 | @@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, |
215 | |
216 | switch (size) { |
217 | case 1: |
218 | - do { |
219 | - asm ("//__percpu_xchg_1\n" |
220 | - "ldxrb %w[ret], %[ptr]\n" |
221 | - "stxrb %w[loop], %w[val], %[ptr]\n" |
222 | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
223 | - [ptr] "+Q"(*(u8 *)ptr) |
224 | - : [val] "r" (val)); |
225 | - } while (loop); |
226 | + asm ("//__percpu_xchg_1\n" |
227 | + "1: ldxrb %w[ret], %[ptr]\n" |
228 | + " stxrb %w[loop], %w[val], %[ptr]\n" |
229 | + " cbnz %w[loop], 1b" |
230 | + : [loop] "=&r"(loop), [ret] "=&r"(ret), |
231 | + [ptr] "+Q"(*(u8 *)ptr) |
232 | + : [val] "r" (val)); |
233 | break; |
234 | case 2: |
235 | - do { |
236 | - asm ("//__percpu_xchg_2\n" |
237 | - "ldxrh %w[ret], %[ptr]\n" |
238 | - "stxrh %w[loop], %w[val], %[ptr]\n" |
239 | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
240 | - [ptr] "+Q"(*(u16 *)ptr) |
241 | - : [val] "r" (val)); |
242 | - } while (loop); |
243 | + asm ("//__percpu_xchg_2\n" |
244 | + "1: ldxrh %w[ret], %[ptr]\n" |
245 | + " stxrh %w[loop], %w[val], %[ptr]\n" |
246 | + " cbnz %w[loop], 1b" |
247 | + : [loop] "=&r"(loop), [ret] "=&r"(ret), |
248 | + [ptr] "+Q"(*(u16 *)ptr) |
249 | + : [val] "r" (val)); |
250 | break; |
251 | case 4: |
252 | - do { |
253 | - asm ("//__percpu_xchg_4\n" |
254 | - "ldxr %w[ret], %[ptr]\n" |
255 | - "stxr %w[loop], %w[val], %[ptr]\n" |
256 | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
257 | - [ptr] "+Q"(*(u32 *)ptr) |
258 | - : [val] "r" (val)); |
259 | - } while (loop); |
260 | + asm ("//__percpu_xchg_4\n" |
261 | + "1: ldxr %w[ret], %[ptr]\n" |
262 | + " stxr %w[loop], %w[val], %[ptr]\n" |
263 | + " cbnz %w[loop], 1b" |
264 | + : [loop] "=&r"(loop), [ret] "=&r"(ret), |
265 | + [ptr] "+Q"(*(u32 *)ptr) |
266 | + : [val] "r" (val)); |
267 | break; |
268 | case 8: |
269 | - do { |
270 | - asm ("//__percpu_xchg_8\n" |
271 | - "ldxr %[ret], %[ptr]\n" |
272 | - "stxr %w[loop], %[val], %[ptr]\n" |
273 | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
274 | - [ptr] "+Q"(*(u64 *)ptr) |
275 | - : [val] "r" (val)); |
276 | - } while (loop); |
277 | + asm ("//__percpu_xchg_8\n" |
278 | + "1: ldxr %[ret], %[ptr]\n" |
279 | + " stxr %w[loop], %[val], %[ptr]\n" |
280 | + " cbnz %w[loop], 1b" |
281 | + : [loop] "=&r"(loop), [ret] "=&r"(ret), |
282 | + [ptr] "+Q"(*(u64 *)ptr) |
283 | + : [val] "r" (val)); |
284 | break; |
285 | default: |
286 | BUILD_BUG(); |
287 | diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h |
288 | index c47257c91b77..db849839e07b 100644 |
289 | --- a/arch/arm64/include/asm/uaccess.h |
290 | +++ b/arch/arm64/include/asm/uaccess.h |
291 | @@ -21,6 +21,7 @@ |
292 | /* |
293 | * User space memory access functions |
294 | */ |
295 | +#include <linux/bitops.h> |
296 | #include <linux/kasan-checks.h> |
297 | #include <linux/string.h> |
298 | #include <linux/thread_info.h> |
299 | @@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs) |
300 | flag; \ |
301 | }) |
302 | |
303 | +/* |
304 | + * When dealing with data aborts or instruction traps we may end up with |
305 | + * a tagged userland pointer. Clear the tag to get a sane pointer to pass |
306 | + * on to access_ok(), for instance. |
307 | + */ |
308 | +#define untagged_addr(addr) sign_extend64(addr, 55) |
309 | + |
310 | #define access_ok(type, addr, size) __range_ok(addr, size) |
311 | #define user_addr_max get_fs |
312 | |
313 | diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c |
314 | index 42ffdb54e162..b0988bb1bf64 100644 |
315 | --- a/arch/arm64/kernel/armv8_deprecated.c |
316 | +++ b/arch/arm64/kernel/armv8_deprecated.c |
317 | @@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table) |
318 | /* |
319 | * Error-checking SWP macros implemented using ldxr{b}/stxr{b} |
320 | */ |
321 | -#define __user_swpX_asm(data, addr, res, temp, B) \ |
322 | + |
323 | +/* Arbitrary constant to ensure forward-progress of the LL/SC loop */ |
324 | +#define __SWP_LL_SC_LOOPS 4 |
325 | + |
326 | +#define __user_swpX_asm(data, addr, res, temp, temp2, B) \ |
327 | __asm__ __volatile__( \ |
328 | + " mov %w3, %w7\n" \ |
329 | ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ |
330 | CONFIG_ARM64_PAN) \ |
331 | - "0: ldxr"B" %w2, [%3]\n" \ |
332 | - "1: stxr"B" %w0, %w1, [%3]\n" \ |
333 | + "0: ldxr"B" %w2, [%4]\n" \ |
334 | + "1: stxr"B" %w0, %w1, [%4]\n" \ |
335 | " cbz %w0, 2f\n" \ |
336 | - " mov %w0, %w4\n" \ |
337 | + " sub %w3, %w3, #1\n" \ |
338 | + " cbnz %w3, 0b\n" \ |
339 | + " mov %w0, %w5\n" \ |
340 | " b 3f\n" \ |
341 | "2:\n" \ |
342 | " mov %w1, %w2\n" \ |
343 | "3:\n" \ |
344 | " .pushsection .fixup,\"ax\"\n" \ |
345 | " .align 2\n" \ |
346 | - "4: mov %w0, %w5\n" \ |
347 | + "4: mov %w0, %w6\n" \ |
348 | " b 3b\n" \ |
349 | " .popsection" \ |
350 | _ASM_EXTABLE(0b, 4b) \ |
351 | _ASM_EXTABLE(1b, 4b) \ |
352 | ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ |
353 | CONFIG_ARM64_PAN) \ |
354 | - : "=&r" (res), "+r" (data), "=&r" (temp) \ |
355 | - : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ |
356 | + : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ |
357 | + : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \ |
358 | + "i" (__SWP_LL_SC_LOOPS) \ |
359 | : "memory") |
360 | |
361 | -#define __user_swp_asm(data, addr, res, temp) \ |
362 | - __user_swpX_asm(data, addr, res, temp, "") |
363 | -#define __user_swpb_asm(data, addr, res, temp) \ |
364 | - __user_swpX_asm(data, addr, res, temp, "b") |
365 | +#define __user_swp_asm(data, addr, res, temp, temp2) \ |
366 | + __user_swpX_asm(data, addr, res, temp, temp2, "") |
367 | +#define __user_swpb_asm(data, addr, res, temp, temp2) \ |
368 | + __user_swpX_asm(data, addr, res, temp, temp2, "b") |
369 | |
370 | /* |
371 | * Bit 22 of the instruction encoding distinguishes between |
372 | @@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data, |
373 | } |
374 | |
375 | while (1) { |
376 | - unsigned long temp; |
377 | + unsigned long temp, temp2; |
378 | |
379 | if (type == TYPE_SWPB) |
380 | - __user_swpb_asm(*data, address, res, temp); |
381 | + __user_swpb_asm(*data, address, res, temp, temp2); |
382 | else |
383 | - __user_swp_asm(*data, address, res, temp); |
384 | + __user_swp_asm(*data, address, res, temp, temp2); |
385 | |
386 | if (likely(res != -EAGAIN) || signal_pending(current)) |
387 | break; |
388 | diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S |
389 | index 3e7b050e99dc..4d19508c55a3 100644 |
390 | --- a/arch/arm64/kernel/head.S |
391 | +++ b/arch/arm64/kernel/head.S |
392 | @@ -578,8 +578,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems |
393 | b.lt 4f // Skip if no PMU present |
394 | mrs x0, pmcr_el0 // Disable debug access traps |
395 | ubfx x0, x0, #11, #5 // to EL2 and allow access to |
396 | - msr mdcr_el2, x0 // all PMU counters from EL1 |
397 | 4: |
398 | + csel x0, xzr, x0, lt // all PMU counters from EL1 |
399 | + msr mdcr_el2, x0 // (if they exist) |
400 | |
401 | /* Stage-2 translation */ |
402 | msr vttbr_el2, xzr |
403 | diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c |
404 | index df06750846de..771a01a7fbce 100644 |
405 | --- a/arch/arm64/kernel/traps.c |
406 | +++ b/arch/arm64/kernel/traps.c |
407 | @@ -434,18 +434,21 @@ void cpu_enable_cache_maint_trap(void *__unused) |
408 | } |
409 | |
410 | #define __user_cache_maint(insn, address, res) \ |
411 | - asm volatile ( \ |
412 | - "1: " insn ", %1\n" \ |
413 | - " mov %w0, #0\n" \ |
414 | - "2:\n" \ |
415 | - " .pushsection .fixup,\"ax\"\n" \ |
416 | - " .align 2\n" \ |
417 | - "3: mov %w0, %w2\n" \ |
418 | - " b 2b\n" \ |
419 | - " .popsection\n" \ |
420 | - _ASM_EXTABLE(1b, 3b) \ |
421 | - : "=r" (res) \ |
422 | - : "r" (address), "i" (-EFAULT) ) |
423 | + if (untagged_addr(address) >= user_addr_max()) \ |
424 | + res = -EFAULT; \ |
425 | + else \ |
426 | + asm volatile ( \ |
427 | + "1: " insn ", %1\n" \ |
428 | + " mov %w0, #0\n" \ |
429 | + "2:\n" \ |
430 | + " .pushsection .fixup,\"ax\"\n" \ |
431 | + " .align 2\n" \ |
432 | + "3: mov %w0, %w2\n" \ |
433 | + " b 2b\n" \ |
434 | + " .popsection\n" \ |
435 | + _ASM_EXTABLE(1b, 3b) \ |
436 | + : "=r" (res) \ |
437 | + : "r" (address), "i" (-EFAULT) ) |
438 | |
439 | asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) |
440 | { |
441 | diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S |
442 | index ce9e5e5f28cf..eaf08d3abbef 100644 |
443 | --- a/arch/arm64/kvm/hyp/entry.S |
444 | +++ b/arch/arm64/kvm/hyp/entry.S |
445 | @@ -98,6 +98,8 @@ ENTRY(__guest_exit) |
446 | // x4-x29,lr: vcpu regs |
447 | // vcpu x0-x3 on the stack |
448 | |
449 | + ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) |
450 | + |
451 | add x2, x0, #VCPU_CONTEXT |
452 | |
453 | stp x4, x5, [x2, #CPU_XREG_OFFSET(4)] |
454 | diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h |
455 | index 470e365f04ea..8ff0a70865f6 100644 |
456 | --- a/arch/metag/include/asm/atomic.h |
457 | +++ b/arch/metag/include/asm/atomic.h |
458 | @@ -39,11 +39,10 @@ |
459 | #define atomic_dec(v) atomic_sub(1, (v)) |
460 | |
461 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
462 | +#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) |
463 | |
464 | #endif |
465 | |
466 | -#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) |
467 | - |
468 | #include <asm-generic/atomic64.h> |
469 | |
470 | #endif /* __ASM_METAG_ATOMIC_H */ |
471 | diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h |
472 | index f6fc6aac5496..b6578611dddb 100644 |
473 | --- a/arch/mips/include/asm/ptrace.h |
474 | +++ b/arch/mips/include/asm/ptrace.h |
475 | @@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs) |
476 | |
477 | static inline long regs_return_value(struct pt_regs *regs) |
478 | { |
479 | - if (is_syscall_success(regs)) |
480 | + if (is_syscall_success(regs) || !user_mode(regs)) |
481 | return regs->regs[2]; |
482 | else |
483 | return -regs->regs[2]; |
484 | diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile |
485 | index 3b4538ec0102..de9e8836d248 100644 |
486 | --- a/arch/mips/vdso/Makefile |
487 | +++ b/arch/mips/vdso/Makefile |
488 | @@ -82,7 +82,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o) |
489 | $(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi) |
490 | $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi) |
491 | |
492 | -$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi) |
493 | +$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi) |
494 | |
495 | $(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE |
496 | $(call if_changed,vdsold) |
497 | diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h |
498 | index 291cee28ccb6..c2c43f714684 100644 |
499 | --- a/arch/parisc/include/asm/pgtable.h |
500 | +++ b/arch/parisc/include/asm/pgtable.h |
501 | @@ -83,10 +83,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) |
502 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) |
503 | |
504 | /* This is the size of the initially mapped kernel memory */ |
505 | -#ifdef CONFIG_64BIT |
506 | -#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ |
507 | +#if defined(CONFIG_64BIT) |
508 | +#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */ |
509 | #else |
510 | -#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ |
511 | +#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ |
512 | #endif |
513 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) |
514 | |
515 | diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c |
516 | index f7ea626e29c9..81d6f6391944 100644 |
517 | --- a/arch/parisc/kernel/setup.c |
518 | +++ b/arch/parisc/kernel/setup.c |
519 | @@ -38,6 +38,7 @@ |
520 | #include <linux/export.h> |
521 | |
522 | #include <asm/processor.h> |
523 | +#include <asm/sections.h> |
524 | #include <asm/pdc.h> |
525 | #include <asm/led.h> |
526 | #include <asm/machdep.h> /* for pa7300lc_init() proto */ |
527 | @@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p) |
528 | #endif |
529 | printk(KERN_CONT ".\n"); |
530 | |
531 | + /* |
532 | + * Check if initial kernel page mappings are sufficient. |
533 | + * panic early if not, else we may access kernel functions |
534 | + * and variables which can't be reached. |
535 | + */ |
536 | + if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE) |
537 | + panic("KERNEL_INITIAL_ORDER too small!"); |
538 | |
539 | pdc_console_init(); |
540 | |
541 | diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c |
542 | index 4b0b963d52a7..9b63b876a13a 100644 |
543 | --- a/arch/parisc/kernel/time.c |
544 | +++ b/arch/parisc/kernel/time.c |
545 | @@ -226,12 +226,6 @@ void __init start_cpu_itimer(void) |
546 | unsigned int cpu = smp_processor_id(); |
547 | unsigned long next_tick = mfctl(16) + clocktick; |
548 | |
549 | -#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) |
550 | - /* With multiple 64bit CPUs online, the cr16's are not syncronized. */ |
551 | - if (cpu != 0) |
552 | - clear_sched_clock_stable(); |
553 | -#endif |
554 | - |
555 | mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ |
556 | |
557 | per_cpu(cpu_data, cpu).it_value = next_tick; |
558 | diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S |
559 | index f3ead0b6ce46..75304af9f742 100644 |
560 | --- a/arch/parisc/kernel/vmlinux.lds.S |
561 | +++ b/arch/parisc/kernel/vmlinux.lds.S |
562 | @@ -89,8 +89,9 @@ SECTIONS |
563 | /* Start of data section */ |
564 | _sdata = .; |
565 | |
566 | - RO_DATA_SECTION(8) |
567 | - |
568 | + /* Architecturally we need to keep __gp below 0x1000000 and thus |
569 | + * in front of RO_DATA_SECTION() which stores lots of tracepoint |
570 | + * and ftrace symbols. */ |
571 | #ifdef CONFIG_64BIT |
572 | . = ALIGN(16); |
573 | /* Linkage tables */ |
574 | @@ -105,6 +106,8 @@ SECTIONS |
575 | } |
576 | #endif |
577 | |
578 | + RO_DATA_SECTION(8) |
579 | + |
580 | /* unwind info */ |
581 | .PARISC.unwind : { |
582 | __start___unwind = .; |
583 | diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig |
584 | index 927d2ab2ce08..792cb1768c8f 100644 |
585 | --- a/arch/powerpc/Kconfig |
586 | +++ b/arch/powerpc/Kconfig |
587 | @@ -637,7 +637,7 @@ config FORCE_MAX_ZONEORDER |
588 | int "Maximum zone order" |
589 | range 8 9 if PPC64 && PPC_64K_PAGES |
590 | default "9" if PPC64 && PPC_64K_PAGES |
591 | - range 9 13 if PPC64 && !PPC_64K_PAGES |
592 | + range 13 13 if PPC64 && !PPC_64K_PAGES |
593 | default "13" if PPC64 && !PPC_64K_PAGES |
594 | range 9 64 if PPC32 && PPC_16K_PAGES |
595 | default "9" if PPC32 && PPC_16K_PAGES |
596 | diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c |
597 | index 5f36e8a70daa..29aa8d1ce273 100644 |
598 | --- a/arch/powerpc/kernel/eeh_driver.c |
599 | +++ b/arch/powerpc/kernel/eeh_driver.c |
600 | @@ -994,6 +994,14 @@ static void eeh_handle_special_event(void) |
601 | /* Notify all devices to be down */ |
602 | eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); |
603 | bus = eeh_pe_bus_get(phb_pe); |
604 | + if (!bus) { |
605 | + pr_err("%s: Cannot find PCI bus for " |
606 | + "PHB#%d-PE#%x\n", |
607 | + __func__, |
608 | + pe->phb->global_number, |
609 | + pe->addr); |
610 | + break; |
611 | + } |
612 | eeh_pe_dev_traverse(pe, |
613 | eeh_report_failure, NULL); |
614 | pci_hp_remove_devices(bus); |
615 | diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S |
616 | index 184a6ba7f283..abf17feffe40 100644 |
617 | --- a/arch/powerpc/kernel/vdso64/datapage.S |
618 | +++ b/arch/powerpc/kernel/vdso64/datapage.S |
619 | @@ -59,7 +59,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) |
620 | bl V_LOCAL_FUNC(__get_datapage) |
621 | mtlr r12 |
622 | addi r3,r3,CFG_SYSCALL_MAP64 |
623 | - cmpli cr0,r4,0 |
624 | + cmpldi cr0,r4,0 |
625 | crclr cr0*4+so |
626 | beqlr |
627 | li r0,NR_syscalls |
628 | diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S |
629 | index a76b4af37ef2..382021324883 100644 |
630 | --- a/arch/powerpc/kernel/vdso64/gettimeofday.S |
631 | +++ b/arch/powerpc/kernel/vdso64/gettimeofday.S |
632 | @@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) |
633 | bne cr0,99f |
634 | |
635 | li r3,0 |
636 | - cmpli cr0,r4,0 |
637 | + cmpldi cr0,r4,0 |
638 | crclr cr0*4+so |
639 | beqlr |
640 | lis r5,CLOCK_REALTIME_RES@h |
641 | diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S |
642 | index f09899e35991..7b22624f332c 100644 |
643 | --- a/arch/powerpc/lib/copyuser_64.S |
644 | +++ b/arch/powerpc/lib/copyuser_64.S |
645 | @@ -359,6 +359,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) |
646 | addi r3,r3,8 |
647 | 171: |
648 | 177: |
649 | +179: |
650 | addi r3,r3,8 |
651 | 370: |
652 | 372: |
653 | @@ -373,7 +374,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) |
654 | 173: |
655 | 174: |
656 | 175: |
657 | -179: |
658 | 181: |
659 | 184: |
660 | 186: |
661 | diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c |
662 | index bb0354222b11..362954f98029 100644 |
663 | --- a/arch/powerpc/mm/copro_fault.c |
664 | +++ b/arch/powerpc/mm/copro_fault.c |
665 | @@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) |
666 | switch (REGION_ID(ea)) { |
667 | case USER_REGION_ID: |
668 | pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); |
669 | + if (mm == NULL) |
670 | + return 1; |
671 | psize = get_slice_psize(mm, ea); |
672 | ssize = user_segment_size(ea); |
673 | vsid = get_vsid(mm->context.id, ea, ssize); |
674 | diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c |
675 | index 0821556e16f4..28923b2e2df1 100644 |
676 | --- a/arch/powerpc/mm/hash_utils_64.c |
677 | +++ b/arch/powerpc/mm/hash_utils_64.c |
678 | @@ -526,7 +526,7 @@ static bool might_have_hea(void) |
679 | */ |
680 | #ifdef CONFIG_IBMEBUS |
681 | return !cpu_has_feature(CPU_FTR_ARCH_207S) && |
682 | - !firmware_has_feature(FW_FEATURE_SPLPAR); |
683 | + firmware_has_feature(FW_FEATURE_SPLPAR); |
684 | #else |
685 | return false; |
686 | #endif |
687 | diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c |
688 | index 86544ea85dc3..ba17fdd87ab0 100644 |
689 | --- a/arch/powerpc/platforms/powernv/eeh-powernv.c |
690 | +++ b/arch/powerpc/platforms/powernv/eeh-powernv.c |
691 | @@ -1091,6 +1091,11 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option) |
692 | } |
693 | |
694 | bus = eeh_pe_bus_get(pe); |
695 | + if (!bus) { |
696 | + pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n", |
697 | + __func__, pe->phb->global_number, pe->addr); |
698 | + return -EIO; |
699 | + } |
700 | if (pe->type & EEH_PE_VF) |
701 | return pnv_eeh_reset_vf_pe(pe, option); |
702 | |
703 | @@ -1306,7 +1311,7 @@ static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) |
704 | return; |
705 | } |
706 | |
707 | - switch (data->type) { |
708 | + switch (be16_to_cpu(data->type)) { |
709 | case OPAL_P7IOC_DIAG_TYPE_RGC: |
710 | pr_info("P7IOC diag-data for RGC\n\n"); |
711 | pnv_eeh_dump_hub_diag_common(data); |
712 | @@ -1538,7 +1543,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe) |
713 | |
714 | /* Try best to clear it */ |
715 | opal_pci_eeh_freeze_clear(phb->opal_id, |
716 | - frozen_pe_no, |
717 | + be64_to_cpu(frozen_pe_no), |
718 | OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); |
719 | ret = EEH_NEXT_ERR_NONE; |
720 | } else if ((*pe)->state & EEH_PE_ISOLATED || |
721 | diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c |
722 | index a21d831c1114..0fe3520058a5 100644 |
723 | --- a/arch/powerpc/platforms/powernv/pci.c |
724 | +++ b/arch/powerpc/platforms/powernv/pci.c |
725 | @@ -309,8 +309,8 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, |
726 | be64_to_cpu(data->dma1ErrorLog1)); |
727 | |
728 | for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { |
729 | - if ((data->pestA[i] >> 63) == 0 && |
730 | - (data->pestB[i] >> 63) == 0) |
731 | + if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && |
732 | + (be64_to_cpu(data->pestB[i]) >> 63) == 0) |
733 | continue; |
734 | |
735 | pr_info("PE[%3d] A/B: %016llx %016llx\n", |
736 | diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c |
737 | index 86707e67843f..aa35245d8d6d 100644 |
738 | --- a/arch/powerpc/platforms/pseries/lpar.c |
739 | +++ b/arch/powerpc/platforms/pseries/lpar.c |
740 | @@ -393,7 +393,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, |
741 | unsigned long *vpn, int count, |
742 | int psize, int ssize) |
743 | { |
744 | - unsigned long param[8]; |
745 | + unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
746 | int i = 0, pix = 0, rc; |
747 | unsigned long flags = 0; |
748 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
749 | @@ -522,7 +522,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) |
750 | unsigned long flags = 0; |
751 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); |
752 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
753 | - unsigned long param[9]; |
754 | + unsigned long param[PLPAR_HCALL9_BUFSIZE]; |
755 | unsigned long hash, index, shift, hidx, slot; |
756 | real_pte_t pte; |
757 | int psize, ssize; |
758 | diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c |
759 | index 81d49476c47e..82e8e2b6a3c4 100644 |
760 | --- a/arch/powerpc/sysdev/cpm1.c |
761 | +++ b/arch/powerpc/sysdev/cpm1.c |
762 | @@ -233,8 +233,6 @@ void __init cpm_reset(void) |
763 | else |
764 | out_be32(&siu_conf->sc_sdcr, 1); |
765 | immr_unmap(siu_conf); |
766 | - |
767 | - cpm_muram_init(); |
768 | } |
769 | |
770 | static DEFINE_SPINLOCK(cmd_lock); |
771 | diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c |
772 | index 8dc1e24f3c23..f78ff841652c 100644 |
773 | --- a/arch/powerpc/sysdev/cpm2.c |
774 | +++ b/arch/powerpc/sysdev/cpm2.c |
775 | @@ -66,10 +66,6 @@ void __init cpm2_reset(void) |
776 | cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE); |
777 | #endif |
778 | |
779 | - /* Reclaim the DP memory for our use. |
780 | - */ |
781 | - cpm_muram_init(); |
782 | - |
783 | /* Tell everyone where the comm processor resides. |
784 | */ |
785 | cpmp = &cpm2_immr->im_cpm; |
786 | diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c |
787 | index 947f42007734..51bf749a4f3a 100644 |
788 | --- a/arch/powerpc/sysdev/cpm_common.c |
789 | +++ b/arch/powerpc/sysdev/cpm_common.c |
790 | @@ -37,6 +37,21 @@ |
791 | #include <linux/of_gpio.h> |
792 | #endif |
793 | |
794 | +static int __init cpm_init(void) |
795 | +{ |
796 | + struct device_node *np; |
797 | + |
798 | + np = of_find_compatible_node(NULL, NULL, "fsl,cpm1"); |
799 | + if (!np) |
800 | + np = of_find_compatible_node(NULL, NULL, "fsl,cpm2"); |
801 | + if (!np) |
802 | + return -ENODEV; |
803 | + cpm_muram_init(); |
804 | + of_node_put(np); |
805 | + return 0; |
806 | +} |
807 | +subsys_initcall(cpm_init); |
808 | + |
809 | #ifdef CONFIG_PPC_EARLY_DEBUG_CPM |
810 | static u32 __iomem *cpm_udbg_txdesc; |
811 | static u8 __iomem *cpm_udbg_txbuf; |
812 | diff --git a/arch/powerpc/xmon/spr_access.S b/arch/powerpc/xmon/spr_access.S |
813 | index 84ad74213c83..7d8b0e8ed6d9 100644 |
814 | --- a/arch/powerpc/xmon/spr_access.S |
815 | +++ b/arch/powerpc/xmon/spr_access.S |
816 | @@ -2,12 +2,12 @@ |
817 | |
818 | /* unsigned long xmon_mfspr(sprn, default_value) */ |
819 | _GLOBAL(xmon_mfspr) |
820 | - ld r5, .Lmfspr_table@got(r2) |
821 | + PPC_LL r5, .Lmfspr_table@got(r2) |
822 | b xmon_mxspr |
823 | |
824 | /* void xmon_mtspr(sprn, new_value) */ |
825 | _GLOBAL(xmon_mtspr) |
826 | - ld r5, .Lmtspr_table@got(r2) |
827 | + PPC_LL r5, .Lmtspr_table@got(r2) |
828 | b xmon_mxspr |
829 | |
830 | /* |
831 | diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c |
832 | index dfd0ca2638fa..9746b780ad5a 100644 |
833 | --- a/arch/s390/kvm/intercept.c |
834 | +++ b/arch/s390/kvm/intercept.c |
835 | @@ -118,8 +118,13 @@ static int handle_validity(struct kvm_vcpu *vcpu) |
836 | |
837 | vcpu->stat.exit_validity++; |
838 | trace_kvm_s390_intercept_validity(vcpu, viwhy); |
839 | - WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy); |
840 | - return -EOPNOTSUPP; |
841 | + KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy, |
842 | + current->pid, vcpu->kvm); |
843 | + |
844 | + /* do not warn on invalid runtime instrumentation mode */ |
845 | + WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n", |
846 | + viwhy); |
847 | + return -EINVAL; |
848 | } |
849 | |
850 | static int handle_instruction(struct kvm_vcpu *vcpu) |
851 | diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c |
852 | index 8a90f1517837..625eb698c780 100644 |
853 | --- a/arch/x86/kernel/e820.c |
854 | +++ b/arch/x86/kernel/e820.c |
855 | @@ -348,7 +348,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, |
856 | * continue building up new bios map based on this |
857 | * information |
858 | */ |
859 | - if (current_type != last_type) { |
860 | + if (current_type != last_type || current_type == E820_PRAM) { |
861 | if (last_type != 0) { |
862 | new_bios[new_bios_entry].size = |
863 | change_point[chgidx]->addr - last_addr; |
864 | diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c |
865 | index 82b17373b66a..9e152cdab0f3 100644 |
866 | --- a/arch/x86/kernel/smpboot.c |
867 | +++ b/arch/x86/kernel/smpboot.c |
868 | @@ -1408,15 +1408,17 @@ __init void prefill_possible_map(void) |
869 | |
870 | /* No boot processor was found in mptable or ACPI MADT */ |
871 | if (!num_processors) { |
872 | - int apicid = boot_cpu_physical_apicid; |
873 | - int cpu = hard_smp_processor_id(); |
874 | + if (boot_cpu_has(X86_FEATURE_APIC)) { |
875 | + int apicid = boot_cpu_physical_apicid; |
876 | + int cpu = hard_smp_processor_id(); |
877 | |
878 | - pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu); |
879 | + pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu); |
880 | |
881 | - /* Make sure boot cpu is enumerated */ |
882 | - if (apic->cpu_present_to_apicid(0) == BAD_APICID && |
883 | - apic->apic_id_valid(apicid)) |
884 | - generic_processor_info(apicid, boot_cpu_apic_version); |
885 | + /* Make sure boot cpu is enumerated */ |
886 | + if (apic->cpu_present_to_apicid(0) == BAD_APICID && |
887 | + apic->apic_id_valid(apicid)) |
888 | + generic_processor_info(apicid, boot_cpu_apic_version); |
889 | + } |
890 | |
891 | if (!num_processors) |
892 | num_processors = 1; |
893 | diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c |
894 | index c7220ba94aa7..1a22de70f7f7 100644 |
895 | --- a/arch/x86/kvm/ioapic.c |
896 | +++ b/arch/x86/kvm/ioapic.c |
897 | @@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) |
898 | ioapic->irr = 0; |
899 | ioapic->irr_delivered = 0; |
900 | ioapic->id = 0; |
901 | - memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); |
902 | + memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi)); |
903 | rtc_irq_eoi_tracking_reset(ioapic); |
904 | } |
905 | |
906 | diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c |
907 | index 23f2f3e41c7f..58e152b3bd90 100644 |
908 | --- a/arch/x86/platform/uv/bios_uv.c |
909 | +++ b/arch/x86/platform/uv/bios_uv.c |
910 | @@ -40,7 +40,15 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) |
911 | */ |
912 | return BIOS_STATUS_UNIMPLEMENTED; |
913 | |
914 | - ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); |
915 | + /* |
916 | + * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI |
917 | + * callback method, which uses efi_call() directly, with the kernel page tables: |
918 | + */ |
919 | + if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags))) |
920 | + ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5); |
921 | + else |
922 | + ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); |
923 | + |
924 | return ret; |
925 | } |
926 | EXPORT_SYMBOL_GPL(uv_bios_call); |
927 | diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c |
928 | index dd38e5ced4a3..b08ccbb9393a 100644 |
929 | --- a/block/blk-cgroup.c |
930 | +++ b/block/blk-cgroup.c |
931 | @@ -1340,10 +1340,8 @@ int blkcg_policy_register(struct blkcg_policy *pol) |
932 | struct blkcg_policy_data *cpd; |
933 | |
934 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
935 | - if (!cpd) { |
936 | - mutex_unlock(&blkcg_pol_mutex); |
937 | + if (!cpd) |
938 | goto err_free_cpds; |
939 | - } |
940 | |
941 | blkcg->cpd[pol->plid] = cpd; |
942 | cpd->blkcg = blkcg; |
943 | diff --git a/drivers/base/platform.c b/drivers/base/platform.c |
944 | index 6482d47deb50..d5572295cad3 100644 |
945 | --- a/drivers/base/platform.c |
946 | +++ b/drivers/base/platform.c |
947 | @@ -97,7 +97,7 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) |
948 | int ret; |
949 | |
950 | ret = of_irq_get(dev->dev.of_node, num); |
951 | - if (ret >= 0 || ret == -EPROBE_DEFER) |
952 | + if (ret > 0 || ret == -EPROBE_DEFER) |
953 | return ret; |
954 | } |
955 | |
956 | @@ -175,7 +175,7 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name) |
957 | int ret; |
958 | |
959 | ret = of_irq_get_byname(dev->dev.of_node, name); |
960 | - if (ret >= 0 || ret == -EPROBE_DEFER) |
961 | + if (ret > 0 || ret == -EPROBE_DEFER) |
962 | return ret; |
963 | } |
964 | |
965 | diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c |
966 | index ba1c1ae72ac2..ce8ea10407e4 100644 |
967 | --- a/drivers/clk/imx/clk-imx6q.c |
968 | +++ b/drivers/clk/imx/clk-imx6q.c |
969 | @@ -318,11 +318,16 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) |
970 | clk[IMX6QDL_CLK_IPG_PER_SEL] = imx_clk_mux("ipg_per_sel", base + 0x1c, 6, 1, ipg_per_sels, ARRAY_SIZE(ipg_per_sels)); |
971 | clk[IMX6QDL_CLK_UART_SEL] = imx_clk_mux("uart_sel", base + 0x24, 6, 1, uart_sels, ARRAY_SIZE(uart_sels)); |
972 | clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels_2, ARRAY_SIZE(gpu2d_core_sels_2)); |
973 | + } else if (clk_on_imx6dl()) { |
974 | + clk[IMX6QDL_CLK_MLB_SEL] = imx_clk_mux("mlb_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); |
975 | } else { |
976 | clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels)); |
977 | } |
978 | clk[IMX6QDL_CLK_GPU3D_CORE_SEL] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels)); |
979 | - clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); |
980 | + if (clk_on_imx6dl()) |
981 | + clk[IMX6QDL_CLK_GPU2D_CORE_SEL] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); |
982 | + else |
983 | + clk[IMX6QDL_CLK_GPU3D_SHADER_SEL] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels)); |
984 | clk[IMX6QDL_CLK_IPU1_SEL] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); |
985 | clk[IMX6QDL_CLK_IPU2_SEL] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels)); |
986 | clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT); |
987 | @@ -400,9 +405,15 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) |
988 | clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); |
989 | clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); |
990 | } |
991 | - clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3); |
992 | + if (clk_on_imx6dl()) |
993 | + clk[IMX6QDL_CLK_MLB_PODF] = imx_clk_divider("mlb_podf", "mlb_sel", base + 0x18, 23, 3); |
994 | + else |
995 | + clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3); |
996 | clk[IMX6QDL_CLK_GPU3D_CORE_PODF] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3); |
997 | - clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); |
998 | + if (clk_on_imx6dl()) |
999 | + clk[IMX6QDL_CLK_GPU2D_CORE_PODF] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 29, 3); |
1000 | + else |
1001 | + clk[IMX6QDL_CLK_GPU3D_SHADER] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); |
1002 | clk[IMX6QDL_CLK_IPU1_PODF] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); |
1003 | clk[IMX6QDL_CLK_IPU2_PODF] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); |
1004 | clk[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0); |
1005 | @@ -473,14 +484,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) |
1006 | clk[IMX6QDL_CLK_ESAI_MEM] = imx_clk_gate2_shared("esai_mem", "ahb", base + 0x6c, 16, &share_count_esai); |
1007 | clk[IMX6QDL_CLK_GPT_IPG] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20); |
1008 | clk[IMX6QDL_CLK_GPT_IPG_PER] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22); |
1009 | - if (clk_on_imx6dl()) |
1010 | - /* |
1011 | - * The multiplexer and divider of imx6q clock gpu3d_shader get |
1012 | - * redefined/reused as gpu2d_core_sel and gpu2d_core_podf on imx6dl. |
1013 | - */ |
1014 | - clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu3d_shader", base + 0x6c, 24); |
1015 | - else |
1016 | - clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); |
1017 | + clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24); |
1018 | clk[IMX6QDL_CLK_GPU3D_CORE] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26); |
1019 | clk[IMX6QDL_CLK_HDMI_IAHB] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0); |
1020 | clk[IMX6QDL_CLK_HDMI_ISFR] = imx_clk_gate2("hdmi_isfr", "video_27m", base + 0x70, 4); |
1021 | @@ -511,7 +515,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) |
1022 | * The multiplexer and divider of the imx6q clock gpu2d get |
1023 | * redefined/reused as mlb_sys_sel and mlb_sys_clk_podf on imx6dl. |
1024 | */ |
1025 | - clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "gpu2d_core_podf", base + 0x74, 18); |
1026 | + clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "mlb_podf", base + 0x74, 18); |
1027 | else |
1028 | clk[IMX6QDL_CLK_MLB] = imx_clk_gate2("mlb", "axi", base + 0x74, 18); |
1029 | clk[IMX6QDL_CLK_MMDC_CH0_AXI] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20); |
1030 | @@ -629,6 +633,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) |
1031 | if (IS_ENABLED(CONFIG_PCI_IMX6)) |
1032 | clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]); |
1033 | |
1034 | + /* |
1035 | + * Initialize the GPU clock muxes, so that the maximum specified clock |
1036 | + * rates for the respective SoC are not exceeded. |
1037 | + */ |
1038 | + if (clk_on_imx6dl()) { |
1039 | + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], |
1040 | + clk[IMX6QDL_CLK_PLL2_PFD1_594M]); |
1041 | + clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], |
1042 | + clk[IMX6QDL_CLK_PLL2_PFD1_594M]); |
1043 | + } else if (clk_on_imx6q()) { |
1044 | + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], |
1045 | + clk[IMX6QDL_CLK_MMDC_CH0_AXI]); |
1046 | + clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL], |
1047 | + clk[IMX6QDL_CLK_PLL2_PFD1_594M]); |
1048 | + clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], |
1049 | + clk[IMX6QDL_CLK_PLL3_USB_OTG]); |
1050 | + } |
1051 | + |
1052 | imx_register_uart_clocks(uart_clks); |
1053 | } |
1054 | CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init); |
1055 | diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c |
1056 | index 2ee40fd360ca..e1aa531a4c34 100644 |
1057 | --- a/drivers/cpufreq/cpufreq-dt-platdev.c |
1058 | +++ b/drivers/cpufreq/cpufreq-dt-platdev.c |
1059 | @@ -68,6 +68,8 @@ static const struct of_device_id machines[] __initconst = { |
1060 | |
1061 | { .compatible = "sigma,tango4" }, |
1062 | |
1063 | + { .compatible = "ti,am33xx", }, |
1064 | + { .compatible = "ti,dra7", }, |
1065 | { .compatible = "ti,omap2", }, |
1066 | { .compatible = "ti,omap3", }, |
1067 | { .compatible = "ti,omap4", }, |
1068 | diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c |
1069 | index 18da4f8051d3..13475890d792 100644 |
1070 | --- a/drivers/cpufreq/cpufreq_conservative.c |
1071 | +++ b/drivers/cpufreq/cpufreq_conservative.c |
1072 | @@ -17,6 +17,7 @@ |
1073 | struct cs_policy_dbs_info { |
1074 | struct policy_dbs_info policy_dbs; |
1075 | unsigned int down_skip; |
1076 | + unsigned int requested_freq; |
1077 | }; |
1078 | |
1079 | static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs) |
1080 | @@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) |
1081 | { |
1082 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
1083 | struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs); |
1084 | + unsigned int requested_freq = dbs_info->requested_freq; |
1085 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
1086 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
1087 | unsigned int load = dbs_update(policy); |
1088 | @@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) |
1089 | if (cs_tuners->freq_step == 0) |
1090 | goto out; |
1091 | |
1092 | + /* |
1093 | + * If requested_freq is out of range, it is likely that the limits |
1094 | + * changed in the meantime, so fall back to current frequency in that |
1095 | + * case. |
1096 | + */ |
1097 | + if (requested_freq > policy->max || requested_freq < policy->min) |
1098 | + requested_freq = policy->cur; |
1099 | + |
1100 | /* Check for frequency increase */ |
1101 | if (load > dbs_data->up_threshold) { |
1102 | - unsigned int requested_freq = policy->cur; |
1103 | - |
1104 | dbs_info->down_skip = 0; |
1105 | |
1106 | /* if we are already at full speed then break out early */ |
1107 | @@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) |
1108 | goto out; |
1109 | |
1110 | requested_freq += get_freq_target(cs_tuners, policy); |
1111 | + if (requested_freq > policy->max) |
1112 | + requested_freq = policy->max; |
1113 | |
1114 | __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H); |
1115 | + dbs_info->requested_freq = requested_freq; |
1116 | goto out; |
1117 | } |
1118 | |
1119 | @@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) |
1120 | |
1121 | /* Check for frequency decrease */ |
1122 | if (load < cs_tuners->down_threshold) { |
1123 | - unsigned int freq_target, requested_freq = policy->cur; |
1124 | + unsigned int freq_target; |
1125 | /* |
1126 | * if we cannot reduce the frequency anymore, break out early |
1127 | */ |
1128 | @@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) |
1129 | requested_freq = policy->min; |
1130 | |
1131 | __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L); |
1132 | + dbs_info->requested_freq = requested_freq; |
1133 | } |
1134 | |
1135 | out: |
1136 | @@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy) |
1137 | struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data); |
1138 | |
1139 | dbs_info->down_skip = 0; |
1140 | + dbs_info->requested_freq = policy->cur; |
1141 | } |
1142 | |
1143 | static struct dbs_governor cs_governor = { |
1144 | diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c |
1145 | index be9eade147f2..b46547e907be 100644 |
1146 | --- a/drivers/cpufreq/intel_pstate.c |
1147 | +++ b/drivers/cpufreq/intel_pstate.c |
1148 | @@ -556,12 +556,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask) |
1149 | int min, hw_min, max, hw_max, cpu, range, adj_range; |
1150 | u64 value, cap; |
1151 | |
1152 | - rdmsrl(MSR_HWP_CAPABILITIES, cap); |
1153 | - hw_min = HWP_LOWEST_PERF(cap); |
1154 | - hw_max = HWP_HIGHEST_PERF(cap); |
1155 | - range = hw_max - hw_min; |
1156 | - |
1157 | for_each_cpu(cpu, cpumask) { |
1158 | + rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); |
1159 | + hw_min = HWP_LOWEST_PERF(cap); |
1160 | + hw_max = HWP_HIGHEST_PERF(cap); |
1161 | + range = hw_max - hw_min; |
1162 | + |
1163 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); |
1164 | adj_range = limits->min_perf_pct * range / 100; |
1165 | min = hw_min + adj_range; |
1166 | diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c |
1167 | index 425501c39527..793518a30afe 100644 |
1168 | --- a/drivers/gpio/gpio-mpc8xxx.c |
1169 | +++ b/drivers/gpio/gpio-mpc8xxx.c |
1170 | @@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq, |
1171 | irq_hw_number_t hwirq) |
1172 | { |
1173 | irq_set_chip_data(irq, h->host_data); |
1174 | - irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq); |
1175 | + irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq); |
1176 | |
1177 | return 0; |
1178 | } |
1179 | diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c |
1180 | index f2b776efab3a..5f88ccd6806b 100644 |
1181 | --- a/drivers/infiniband/core/verbs.c |
1182 | +++ b/drivers/infiniband/core/verbs.c |
1183 | @@ -821,7 +821,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, |
1184 | if (ret) { |
1185 | pr_err("failed to init MR pool ret= %d\n", ret); |
1186 | ib_destroy_qp(qp); |
1187 | - qp = ERR_PTR(ret); |
1188 | + return ERR_PTR(ret); |
1189 | } |
1190 | } |
1191 | |
1192 | diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c |
1193 | index 3322ed750172..6b07d4bca764 100644 |
1194 | --- a/drivers/infiniband/ulp/srp/ib_srp.c |
1195 | +++ b/drivers/infiniband/ulp/srp/ib_srp.c |
1196 | @@ -1400,7 +1400,9 @@ static int srp_map_sg_entry(struct srp_map_state *state, |
1197 | |
1198 | while (dma_len) { |
1199 | unsigned offset = dma_addr & ~dev->mr_page_mask; |
1200 | - if (state->npages == dev->max_pages_per_mr || offset != 0) { |
1201 | + |
1202 | + if (state->npages == dev->max_pages_per_mr || |
1203 | + (state->npages > 0 && offset != 0)) { |
1204 | ret = srp_map_finish_fmr(state, ch); |
1205 | if (ret) |
1206 | return ret; |
1207 | @@ -1417,12 +1419,12 @@ static int srp_map_sg_entry(struct srp_map_state *state, |
1208 | } |
1209 | |
1210 | /* |
1211 | - * If the last entry of the MR wasn't a full page, then we need to |
1212 | + * If the end of the MR is not on a page boundary then we need to |
1213 | * close it out and start a new one -- we can only merge at page |
1214 | * boundaries. |
1215 | */ |
1216 | ret = 0; |
1217 | - if (len != dev->mr_page_size) |
1218 | + if ((dma_addr & ~dev->mr_page_mask) != 0) |
1219 | ret = srp_map_finish_fmr(state, ch); |
1220 | return ret; |
1221 | } |
1222 | diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c |
1223 | index 08e252a42480..ff8c10749e57 100644 |
1224 | --- a/drivers/input/mouse/elantech.c |
1225 | +++ b/drivers/input/mouse/elantech.c |
1226 | @@ -1159,6 +1159,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { |
1227 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"), |
1228 | }, |
1229 | }, |
1230 | + { |
1231 | + /* Fujitsu H760 also has a middle button */ |
1232 | + .matches = { |
1233 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
1234 | + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), |
1235 | + }, |
1236 | + }, |
1237 | #endif |
1238 | { } |
1239 | }; |
1240 | @@ -1503,10 +1510,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { |
1241 | }, |
1242 | }, |
1243 | { |
1244 | - /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ |
1245 | + /* Fujitsu H760 does not work with crc_enabled == 0 */ |
1246 | .matches = { |
1247 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
1248 | - DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), |
1249 | + DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), |
1250 | }, |
1251 | }, |
1252 | { |
1253 | @@ -1517,6 +1524,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = { |
1254 | }, |
1255 | }, |
1256 | { |
1257 | + /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */ |
1258 | + .matches = { |
1259 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
1260 | + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"), |
1261 | + }, |
1262 | + }, |
1263 | + { |
1264 | + /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */ |
1265 | + .matches = { |
1266 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
1267 | + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"), |
1268 | + }, |
1269 | + }, |
1270 | + { |
1271 | /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ |
1272 | .matches = { |
1273 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
1274 | diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h |
1275 | index a5eed2ade53d..34da81c006b6 100644 |
1276 | --- a/drivers/input/serio/i8042-io.h |
1277 | +++ b/drivers/input/serio/i8042-io.h |
1278 | @@ -81,7 +81,7 @@ static inline int i8042_platform_init(void) |
1279 | return -EBUSY; |
1280 | #endif |
1281 | |
1282 | - i8042_reset = 1; |
1283 | + i8042_reset = I8042_RESET_ALWAYS; |
1284 | return 0; |
1285 | } |
1286 | |
1287 | diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h |
1288 | index ee1ad27d6ed0..08a1c10a1448 100644 |
1289 | --- a/drivers/input/serio/i8042-ip22io.h |
1290 | +++ b/drivers/input/serio/i8042-ip22io.h |
1291 | @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) |
1292 | return -EBUSY; |
1293 | #endif |
1294 | |
1295 | - i8042_reset = 1; |
1296 | + i8042_reset = I8042_RESET_ALWAYS; |
1297 | |
1298 | return 0; |
1299 | } |
1300 | diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h |
1301 | index f708c75d16f1..1aabea43329e 100644 |
1302 | --- a/drivers/input/serio/i8042-ppcio.h |
1303 | +++ b/drivers/input/serio/i8042-ppcio.h |
1304 | @@ -44,7 +44,7 @@ static inline void i8042_write_command(int val) |
1305 | |
1306 | static inline int i8042_platform_init(void) |
1307 | { |
1308 | - i8042_reset = 1; |
1309 | + i8042_reset = I8042_RESET_ALWAYS; |
1310 | return 0; |
1311 | } |
1312 | |
1313 | diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h |
1314 | index afcd1c1a05b2..6231d63860ee 100644 |
1315 | --- a/drivers/input/serio/i8042-sparcio.h |
1316 | +++ b/drivers/input/serio/i8042-sparcio.h |
1317 | @@ -130,7 +130,7 @@ static int __init i8042_platform_init(void) |
1318 | } |
1319 | } |
1320 | |
1321 | - i8042_reset = 1; |
1322 | + i8042_reset = I8042_RESET_ALWAYS; |
1323 | |
1324 | return 0; |
1325 | } |
1326 | diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h |
1327 | index 73f5cc124a36..455747552f85 100644 |
1328 | --- a/drivers/input/serio/i8042-unicore32io.h |
1329 | +++ b/drivers/input/serio/i8042-unicore32io.h |
1330 | @@ -61,7 +61,7 @@ static inline int i8042_platform_init(void) |
1331 | if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042")) |
1332 | return -EBUSY; |
1333 | |
1334 | - i8042_reset = 1; |
1335 | + i8042_reset = I8042_RESET_ALWAYS; |
1336 | return 0; |
1337 | } |
1338 | |
1339 | diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h |
1340 | index 68f5f4a0f1e7..f4bfb4b2d50a 100644 |
1341 | --- a/drivers/input/serio/i8042-x86ia64io.h |
1342 | +++ b/drivers/input/serio/i8042-x86ia64io.h |
1343 | @@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { |
1344 | { } |
1345 | }; |
1346 | |
1347 | +/* |
1348 | + * On some Asus laptops, just running self tests cause problems. |
1349 | + */ |
1350 | +static const struct dmi_system_id i8042_dmi_noselftest_table[] = { |
1351 | + { |
1352 | + .matches = { |
1353 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1354 | + DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"), |
1355 | + }, |
1356 | + }, |
1357 | + { |
1358 | + .matches = { |
1359 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1360 | + DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"), |
1361 | + }, |
1362 | + }, |
1363 | + { |
1364 | + .matches = { |
1365 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1366 | + DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"), |
1367 | + }, |
1368 | + }, |
1369 | + { |
1370 | + .matches = { |
1371 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1372 | + DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"), |
1373 | + }, |
1374 | + }, |
1375 | + { |
1376 | + .matches = { |
1377 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1378 | + DMI_MATCH(DMI_PRODUCT_NAME, "R409L"), |
1379 | + }, |
1380 | + }, |
1381 | + { |
1382 | + .matches = { |
1383 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1384 | + DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"), |
1385 | + }, |
1386 | + }, |
1387 | + { |
1388 | + .matches = { |
1389 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1390 | + DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"), |
1391 | + }, |
1392 | + }, |
1393 | + { |
1394 | + .matches = { |
1395 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1396 | + DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"), |
1397 | + }, |
1398 | + }, |
1399 | + { |
1400 | + .matches = { |
1401 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1402 | + DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"), |
1403 | + }, |
1404 | + }, |
1405 | + { |
1406 | + .matches = { |
1407 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1408 | + DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"), |
1409 | + }, |
1410 | + }, |
1411 | + { |
1412 | + .matches = { |
1413 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1414 | + DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"), |
1415 | + }, |
1416 | + }, |
1417 | + { |
1418 | + .matches = { |
1419 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1420 | + DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"), |
1421 | + }, |
1422 | + }, |
1423 | + { |
1424 | + .matches = { |
1425 | + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), |
1426 | + DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"), |
1427 | + }, |
1428 | + }, |
1429 | + { } |
1430 | +}; |
1431 | static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { |
1432 | { |
1433 | /* MSI Wind U-100 */ |
1434 | @@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void) |
1435 | return retval; |
1436 | |
1437 | #if defined(__ia64__) |
1438 | - i8042_reset = true; |
1439 | + i8042_reset = I8042_RESET_ALWAYS; |
1440 | #endif |
1441 | |
1442 | #ifdef CONFIG_X86 |
1443 | - if (dmi_check_system(i8042_dmi_reset_table)) |
1444 | - i8042_reset = true; |
1445 | + /* Honor module parameter when value is not default */ |
1446 | + if (i8042_reset == I8042_RESET_DEFAULT) { |
1447 | + if (dmi_check_system(i8042_dmi_reset_table)) |
1448 | + i8042_reset = I8042_RESET_ALWAYS; |
1449 | + |
1450 | + if (dmi_check_system(i8042_dmi_noselftest_table)) |
1451 | + i8042_reset = I8042_RESET_NEVER; |
1452 | + } |
1453 | |
1454 | if (dmi_check_system(i8042_dmi_noloop_table)) |
1455 | i8042_noloop = true; |
1456 | diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c |
1457 | index 405252a884dd..89abfdb539ac 100644 |
1458 | --- a/drivers/input/serio/i8042.c |
1459 | +++ b/drivers/input/serio/i8042.c |
1460 | @@ -48,9 +48,39 @@ static bool i8042_unlock; |
1461 | module_param_named(unlock, i8042_unlock, bool, 0); |
1462 | MODULE_PARM_DESC(unlock, "Ignore keyboard lock."); |
1463 | |
1464 | -static bool i8042_reset; |
1465 | -module_param_named(reset, i8042_reset, bool, 0); |
1466 | -MODULE_PARM_DESC(reset, "Reset controller during init and cleanup."); |
1467 | +enum i8042_controller_reset_mode { |
1468 | + I8042_RESET_NEVER, |
1469 | + I8042_RESET_ALWAYS, |
1470 | + I8042_RESET_ON_S2RAM, |
1471 | +#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM |
1472 | +}; |
1473 | +static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT; |
1474 | +static int i8042_set_reset(const char *val, const struct kernel_param *kp) |
1475 | +{ |
1476 | + enum i8042_controller_reset_mode *arg = kp->arg; |
1477 | + int error; |
1478 | + bool reset; |
1479 | + |
1480 | + if (val) { |
1481 | + error = kstrtobool(val, &reset); |
1482 | + if (error) |
1483 | + return error; |
1484 | + } else { |
1485 | + reset = true; |
1486 | + } |
1487 | + |
1488 | + *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER; |
1489 | + return 0; |
1490 | +} |
1491 | + |
1492 | +static const struct kernel_param_ops param_ops_reset_param = { |
1493 | + .flags = KERNEL_PARAM_OPS_FL_NOARG, |
1494 | + .set = i8042_set_reset, |
1495 | +}; |
1496 | +#define param_check_reset_param(name, p) \ |
1497 | + __param_check(name, p, enum i8042_controller_reset_mode) |
1498 | +module_param_named(reset, i8042_reset, reset_param, 0); |
1499 | +MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both"); |
1500 | |
1501 | static bool i8042_direct; |
1502 | module_param_named(direct, i8042_direct, bool, 0); |
1503 | @@ -1019,7 +1049,7 @@ static int i8042_controller_init(void) |
1504 | * Reset the controller and reset CRT to the original value set by BIOS. |
1505 | */ |
1506 | |
1507 | -static void i8042_controller_reset(bool force_reset) |
1508 | +static void i8042_controller_reset(bool s2r_wants_reset) |
1509 | { |
1510 | i8042_flush(); |
1511 | |
1512 | @@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset) |
1513 | * Reset the controller if requested. |
1514 | */ |
1515 | |
1516 | - if (i8042_reset || force_reset) |
1517 | + if (i8042_reset == I8042_RESET_ALWAYS || |
1518 | + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { |
1519 | i8042_controller_selftest(); |
1520 | + } |
1521 | |
1522 | /* |
1523 | * Restore the original control register setting. |
1524 | @@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void) |
1525 | * before suspending. |
1526 | */ |
1527 | |
1528 | -static int i8042_controller_resume(bool force_reset) |
1529 | +static int i8042_controller_resume(bool s2r_wants_reset) |
1530 | { |
1531 | int error; |
1532 | |
1533 | @@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset) |
1534 | if (error) |
1535 | return error; |
1536 | |
1537 | - if (i8042_reset || force_reset) { |
1538 | + if (i8042_reset == I8042_RESET_ALWAYS || |
1539 | + (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) { |
1540 | error = i8042_controller_selftest(); |
1541 | if (error) |
1542 | return error; |
1543 | @@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev) |
1544 | |
1545 | static int i8042_pm_resume(struct device *dev) |
1546 | { |
1547 | - bool force_reset; |
1548 | + bool want_reset; |
1549 | int i; |
1550 | |
1551 | for (i = 0; i < I8042_NUM_PORTS; i++) { |
1552 | @@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev) |
1553 | * off control to the platform firmware, otherwise we can simply restore |
1554 | * the mode. |
1555 | */ |
1556 | - force_reset = pm_resume_via_firmware(); |
1557 | + want_reset = pm_resume_via_firmware(); |
1558 | |
1559 | - return i8042_controller_resume(force_reset); |
1560 | + return i8042_controller_resume(want_reset); |
1561 | } |
1562 | |
1563 | static int i8042_pm_thaw(struct device *dev) |
1564 | @@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev) |
1565 | |
1566 | i8042_platform_device = dev; |
1567 | |
1568 | - if (i8042_reset) { |
1569 | + if (i8042_reset == I8042_RESET_ALWAYS) { |
1570 | error = i8042_controller_selftest(); |
1571 | if (error) |
1572 | return error; |
1573 | diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c |
1574 | index efbf0e4304b7..ebc2b0b15f67 100644 |
1575 | --- a/drivers/irqchip/irq-eznps.c |
1576 | +++ b/drivers/irqchip/irq-eznps.c |
1577 | @@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd) |
1578 | nps_ack_gic(); |
1579 | } |
1580 | |
1581 | -static void nps400_irq_eoi(struct irq_data *irqd) |
1582 | +static void nps400_irq_ack(struct irq_data *irqd) |
1583 | { |
1584 | unsigned int __maybe_unused irq = irqd_to_hwirq(irqd); |
1585 | |
1586 | @@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = { |
1587 | .name = "NPS400 IC", |
1588 | .irq_mask = nps400_irq_mask, |
1589 | .irq_unmask = nps400_irq_unmask, |
1590 | - .irq_eoi = nps400_irq_eoi, |
1591 | + .irq_ack = nps400_irq_ack, |
1592 | }; |
1593 | |
1594 | static int nps400_irq_map(struct irq_domain *d, unsigned int virq, |
1595 | diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c |
1596 | index da6c0ba61d4f..708a2604a7b5 100644 |
1597 | --- a/drivers/irqchip/irq-gic-v3.c |
1598 | +++ b/drivers/irqchip/irq-gic-v3.c |
1599 | @@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable) |
1600 | return; /* No PM support in this redistributor */ |
1601 | } |
1602 | |
1603 | - while (count--) { |
1604 | + while (--count) { |
1605 | val = readl_relaxed(rbase + GICR_WAKER); |
1606 | if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) |
1607 | break; |
1608 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
1609 | index 874295757caa..6fc8923bd92a 100644 |
1610 | --- a/drivers/md/dm-crypt.c |
1611 | +++ b/drivers/md/dm-crypt.c |
1612 | @@ -113,8 +113,7 @@ struct iv_tcw_private { |
1613 | * and encrypts / decrypts at the same time. |
1614 | */ |
1615 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, |
1616 | - DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, |
1617 | - DM_CRYPT_EXIT_THREAD}; |
1618 | + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; |
1619 | |
1620 | /* |
1621 | * The fields in here must be read only after initialization. |
1622 | @@ -1207,18 +1206,20 @@ continue_locked: |
1623 | if (!RB_EMPTY_ROOT(&cc->write_tree)) |
1624 | goto pop_from_list; |
1625 | |
1626 | - if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { |
1627 | - spin_unlock_irq(&cc->write_thread_wait.lock); |
1628 | - break; |
1629 | - } |
1630 | - |
1631 | - __set_current_state(TASK_INTERRUPTIBLE); |
1632 | + set_current_state(TASK_INTERRUPTIBLE); |
1633 | __add_wait_queue(&cc->write_thread_wait, &wait); |
1634 | |
1635 | spin_unlock_irq(&cc->write_thread_wait.lock); |
1636 | |
1637 | + if (unlikely(kthread_should_stop())) { |
1638 | + set_task_state(current, TASK_RUNNING); |
1639 | + remove_wait_queue(&cc->write_thread_wait, &wait); |
1640 | + break; |
1641 | + } |
1642 | + |
1643 | schedule(); |
1644 | |
1645 | + set_task_state(current, TASK_RUNNING); |
1646 | spin_lock_irq(&cc->write_thread_wait.lock); |
1647 | __remove_wait_queue(&cc->write_thread_wait, &wait); |
1648 | goto continue_locked; |
1649 | @@ -1533,13 +1534,8 @@ static void crypt_dtr(struct dm_target *ti) |
1650 | if (!cc) |
1651 | return; |
1652 | |
1653 | - if (cc->write_thread) { |
1654 | - spin_lock_irq(&cc->write_thread_wait.lock); |
1655 | - set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); |
1656 | - wake_up_locked(&cc->write_thread_wait); |
1657 | - spin_unlock_irq(&cc->write_thread_wait.lock); |
1658 | + if (cc->write_thread) |
1659 | kthread_stop(cc->write_thread); |
1660 | - } |
1661 | |
1662 | if (cc->io_queue) |
1663 | destroy_workqueue(cc->io_queue); |
1664 | diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c |
1665 | index ac734e5bbe48..15db5e9c572e 100644 |
1666 | --- a/drivers/md/dm-mpath.c |
1667 | +++ b/drivers/md/dm-mpath.c |
1668 | @@ -1521,10 +1521,10 @@ static void activate_path(struct work_struct *work) |
1669 | { |
1670 | struct pgpath *pgpath = |
1671 | container_of(work, struct pgpath, activate_path.work); |
1672 | + struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); |
1673 | |
1674 | - if (pgpath->is_active) |
1675 | - scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), |
1676 | - pg_init_done, pgpath); |
1677 | + if (pgpath->is_active && !blk_queue_dying(q)) |
1678 | + scsi_dh_activate(q, pg_init_done, pgpath); |
1679 | else |
1680 | pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); |
1681 | } |
1682 | diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c |
1683 | index 1ca7463e8bb2..5da86c8b6545 100644 |
1684 | --- a/drivers/md/dm-rq.c |
1685 | +++ b/drivers/md/dm-rq.c |
1686 | @@ -73,15 +73,24 @@ static void dm_old_start_queue(struct request_queue *q) |
1687 | spin_unlock_irqrestore(q->queue_lock, flags); |
1688 | } |
1689 | |
1690 | +static void dm_mq_start_queue(struct request_queue *q) |
1691 | +{ |
1692 | + unsigned long flags; |
1693 | + |
1694 | + spin_lock_irqsave(q->queue_lock, flags); |
1695 | + queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
1696 | + spin_unlock_irqrestore(q->queue_lock, flags); |
1697 | + |
1698 | + blk_mq_start_stopped_hw_queues(q, true); |
1699 | + blk_mq_kick_requeue_list(q); |
1700 | +} |
1701 | + |
1702 | void dm_start_queue(struct request_queue *q) |
1703 | { |
1704 | if (!q->mq_ops) |
1705 | dm_old_start_queue(q); |
1706 | - else { |
1707 | - queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q); |
1708 | - blk_mq_start_stopped_hw_queues(q, true); |
1709 | - blk_mq_kick_requeue_list(q); |
1710 | - } |
1711 | + else |
1712 | + dm_mq_start_queue(q); |
1713 | } |
1714 | |
1715 | static void dm_old_stop_queue(struct request_queue *q) |
1716 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
1717 | index fa9b1cb4438a..0f2928b3136b 100644 |
1718 | --- a/drivers/md/dm.c |
1719 | +++ b/drivers/md/dm.c |
1720 | @@ -1873,6 +1873,7 @@ EXPORT_SYMBOL_GPL(dm_device_name); |
1721 | |
1722 | static void __dm_destroy(struct mapped_device *md, bool wait) |
1723 | { |
1724 | + struct request_queue *q = dm_get_md_queue(md); |
1725 | struct dm_table *map; |
1726 | int srcu_idx; |
1727 | |
1728 | @@ -1883,6 +1884,10 @@ static void __dm_destroy(struct mapped_device *md, bool wait) |
1729 | set_bit(DMF_FREEING, &md->flags); |
1730 | spin_unlock(&_minor_lock); |
1731 | |
1732 | + spin_lock_irq(q->queue_lock); |
1733 | + queue_flag_set(QUEUE_FLAG_DYING, q); |
1734 | + spin_unlock_irq(q->queue_lock); |
1735 | + |
1736 | if (dm_request_based(md) && md->kworker_task) |
1737 | flush_kthread_worker(&md->kworker); |
1738 | |
1739 | @@ -2249,10 +2254,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map) |
1740 | |
1741 | int dm_resume(struct mapped_device *md) |
1742 | { |
1743 | - int r = -EINVAL; |
1744 | + int r; |
1745 | struct dm_table *map = NULL; |
1746 | |
1747 | retry: |
1748 | + r = -EINVAL; |
1749 | mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); |
1750 | |
1751 | if (!dm_suspended_md(md)) |
1752 | @@ -2276,8 +2282,6 @@ retry: |
1753 | goto out; |
1754 | |
1755 | clear_bit(DMF_SUSPENDED, &md->flags); |
1756 | - |
1757 | - r = 0; |
1758 | out: |
1759 | mutex_unlock(&md->suspend_lock); |
1760 | |
1761 | diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c |
1762 | index 41325328a22e..fe79358b035e 100644 |
1763 | --- a/drivers/media/dvb-frontends/mb86a20s.c |
1764 | +++ b/drivers/media/dvb-frontends/mb86a20s.c |
1765 | @@ -71,25 +71,27 @@ static struct regdata mb86a20s_init1[] = { |
1766 | }; |
1767 | |
1768 | static struct regdata mb86a20s_init2[] = { |
1769 | - { 0x28, 0x22 }, { 0x29, 0x00 }, { 0x2a, 0x1f }, { 0x2b, 0xf0 }, |
1770 | + { 0x50, 0xd1 }, { 0x51, 0x22 }, |
1771 | + { 0x39, 0x01 }, |
1772 | + { 0x71, 0x00 }, |
1773 | { 0x3b, 0x21 }, |
1774 | - { 0x3c, 0x38 }, |
1775 | + { 0x3c, 0x3a }, |
1776 | { 0x01, 0x0d }, |
1777 | - { 0x04, 0x08 }, { 0x05, 0x03 }, |
1778 | + { 0x04, 0x08 }, { 0x05, 0x05 }, |
1779 | { 0x04, 0x0e }, { 0x05, 0x00 }, |
1780 | - { 0x04, 0x0f }, { 0x05, 0x37 }, |
1781 | - { 0x04, 0x0b }, { 0x05, 0x78 }, |
1782 | + { 0x04, 0x0f }, { 0x05, 0x14 }, |
1783 | + { 0x04, 0x0b }, { 0x05, 0x8c }, |
1784 | { 0x04, 0x00 }, { 0x05, 0x00 }, |
1785 | - { 0x04, 0x01 }, { 0x05, 0x1e }, |
1786 | - { 0x04, 0x02 }, { 0x05, 0x07 }, |
1787 | - { 0x04, 0x03 }, { 0x05, 0xd0 }, |
1788 | + { 0x04, 0x01 }, { 0x05, 0x07 }, |
1789 | + { 0x04, 0x02 }, { 0x05, 0x0f }, |
1790 | + { 0x04, 0x03 }, { 0x05, 0xa0 }, |
1791 | { 0x04, 0x09 }, { 0x05, 0x00 }, |
1792 | { 0x04, 0x0a }, { 0x05, 0xff }, |
1793 | - { 0x04, 0x27 }, { 0x05, 0x00 }, |
1794 | + { 0x04, 0x27 }, { 0x05, 0x64 }, |
1795 | { 0x04, 0x28 }, { 0x05, 0x00 }, |
1796 | - { 0x04, 0x1e }, { 0x05, 0x00 }, |
1797 | - { 0x04, 0x29 }, { 0x05, 0x64 }, |
1798 | - { 0x04, 0x32 }, { 0x05, 0x02 }, |
1799 | + { 0x04, 0x1e }, { 0x05, 0xff }, |
1800 | + { 0x04, 0x29 }, { 0x05, 0x0a }, |
1801 | + { 0x04, 0x32 }, { 0x05, 0x0a }, |
1802 | { 0x04, 0x14 }, { 0x05, 0x02 }, |
1803 | { 0x04, 0x04 }, { 0x05, 0x00 }, |
1804 | { 0x04, 0x05 }, { 0x05, 0x22 }, |
1805 | @@ -97,8 +99,6 @@ static struct regdata mb86a20s_init2[] = { |
1806 | { 0x04, 0x07 }, { 0x05, 0xd8 }, |
1807 | { 0x04, 0x12 }, { 0x05, 0x00 }, |
1808 | { 0x04, 0x13 }, { 0x05, 0xff }, |
1809 | - { 0x04, 0x15 }, { 0x05, 0x4e }, |
1810 | - { 0x04, 0x16 }, { 0x05, 0x20 }, |
1811 | |
1812 | /* |
1813 | * On this demod, when the bit count reaches the count below, |
1814 | @@ -152,42 +152,36 @@ static struct regdata mb86a20s_init2[] = { |
1815 | { 0x50, 0x51 }, { 0x51, 0x04 }, /* MER symbol 4 */ |
1816 | { 0x45, 0x04 }, /* CN symbol 4 */ |
1817 | { 0x48, 0x04 }, /* CN manual mode */ |
1818 | - |
1819 | + { 0x50, 0xd5 }, { 0x51, 0x01 }, |
1820 | { 0x50, 0xd6 }, { 0x51, 0x1f }, |
1821 | { 0x50, 0xd2 }, { 0x51, 0x03 }, |
1822 | - { 0x50, 0xd7 }, { 0x51, 0xbf }, |
1823 | - { 0x28, 0x74 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xff }, |
1824 | - { 0x28, 0x46 }, { 0x29, 0x00 }, { 0x2a, 0x1a }, { 0x2b, 0x0c }, |
1825 | - |
1826 | - { 0x04, 0x40 }, { 0x05, 0x00 }, |
1827 | - { 0x28, 0x00 }, { 0x2b, 0x08 }, |
1828 | - { 0x28, 0x05 }, { 0x2b, 0x00 }, |
1829 | + { 0x50, 0xd7 }, { 0x51, 0x3f }, |
1830 | { 0x1c, 0x01 }, |
1831 | - { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x1f }, |
1832 | - { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x18 }, |
1833 | - { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x12 }, |
1834 | - { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x30 }, |
1835 | - { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x37 }, |
1836 | - { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, |
1837 | - { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x09 }, |
1838 | - { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x06 }, |
1839 | - { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7b }, |
1840 | - { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x76 }, |
1841 | - { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x7d }, |
1842 | - { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x08 }, |
1843 | - { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0b }, |
1844 | - { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, |
1845 | - { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf2 }, |
1846 | - { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xf3 }, |
1847 | - { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x05 }, |
1848 | - { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, |
1849 | - { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, |
1850 | - { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xef }, |
1851 | - { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xd8 }, |
1852 | - { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xf1 }, |
1853 | - { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x3d }, |
1854 | - { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x94 }, |
1855 | - { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0xba }, |
1856 | + { 0x28, 0x06 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x03 }, |
1857 | + { 0x28, 0x07 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0d }, |
1858 | + { 0x28, 0x08 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x02 }, |
1859 | + { 0x28, 0x09 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x01 }, |
1860 | + { 0x28, 0x0a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x21 }, |
1861 | + { 0x28, 0x0b }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x29 }, |
1862 | + { 0x28, 0x0c }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x16 }, |
1863 | + { 0x28, 0x0d }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x31 }, |
1864 | + { 0x28, 0x0e }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0e }, |
1865 | + { 0x28, 0x0f }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x4e }, |
1866 | + { 0x28, 0x10 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x46 }, |
1867 | + { 0x28, 0x11 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x0f }, |
1868 | + { 0x28, 0x12 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x56 }, |
1869 | + { 0x28, 0x13 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x35 }, |
1870 | + { 0x28, 0x14 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbe }, |
1871 | + { 0x28, 0x15 }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0x84 }, |
1872 | + { 0x28, 0x16 }, { 0x29, 0x00 }, { 0x2a, 0x03 }, { 0x2b, 0xee }, |
1873 | + { 0x28, 0x17 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x98 }, |
1874 | + { 0x28, 0x18 }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x9f }, |
1875 | + { 0x28, 0x19 }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0xb2 }, |
1876 | + { 0x28, 0x1a }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0xc2 }, |
1877 | + { 0x28, 0x1b }, { 0x29, 0x00 }, { 0x2a, 0x07 }, { 0x2b, 0x4a }, |
1878 | + { 0x28, 0x1c }, { 0x29, 0x00 }, { 0x2a, 0x01 }, { 0x2b, 0xbc }, |
1879 | + { 0x28, 0x1d }, { 0x29, 0x00 }, { 0x2a, 0x04 }, { 0x2b, 0xba }, |
1880 | + { 0x28, 0x1e }, { 0x29, 0x00 }, { 0x2a, 0x06 }, { 0x2b, 0x14 }, |
1881 | { 0x50, 0x1e }, { 0x51, 0x5d }, |
1882 | { 0x50, 0x22 }, { 0x51, 0x00 }, |
1883 | { 0x50, 0x23 }, { 0x51, 0xc8 }, |
1884 | @@ -196,9 +190,7 @@ static struct regdata mb86a20s_init2[] = { |
1885 | { 0x50, 0x26 }, { 0x51, 0x00 }, |
1886 | { 0x50, 0x27 }, { 0x51, 0xc3 }, |
1887 | { 0x50, 0x39 }, { 0x51, 0x02 }, |
1888 | - { 0xec, 0x0f }, |
1889 | - { 0xeb, 0x1f }, |
1890 | - { 0x28, 0x6a }, { 0x29, 0x00 }, { 0x2a, 0x00 }, { 0x2b, 0x00 }, |
1891 | + { 0x50, 0xd5 }, { 0x51, 0x01 }, |
1892 | { 0xd0, 0x00 }, |
1893 | }; |
1894 | |
1895 | @@ -318,7 +310,11 @@ static int mb86a20s_read_status(struct dvb_frontend *fe, enum fe_status *status) |
1896 | if (val >= 7) |
1897 | *status |= FE_HAS_SYNC; |
1898 | |
1899 | - if (val >= 8) /* Maybe 9? */ |
1900 | + /* |
1901 | + * Actually, on state S8, it starts receiving TS, but the TS |
1902 | + * output is only on normal state after the transition to S9. |
1903 | + */ |
1904 | + if (val >= 9) |
1905 | *status |= FE_HAS_LOCK; |
1906 | |
1907 | dev_dbg(&state->i2c->dev, "%s: Status = 0x%02x (state = %d)\n", |
1908 | @@ -2058,6 +2054,11 @@ static void mb86a20s_release(struct dvb_frontend *fe) |
1909 | kfree(state); |
1910 | } |
1911 | |
1912 | +static int mb86a20s_get_frontend_algo(struct dvb_frontend *fe) |
1913 | +{ |
1914 | + return DVBFE_ALGO_HW; |
1915 | +} |
1916 | + |
1917 | static struct dvb_frontend_ops mb86a20s_ops; |
1918 | |
1919 | struct dvb_frontend *mb86a20s_attach(const struct mb86a20s_config *config, |
1920 | @@ -2130,6 +2131,7 @@ static struct dvb_frontend_ops mb86a20s_ops = { |
1921 | .read_status = mb86a20s_read_status_and_stats, |
1922 | .read_signal_strength = mb86a20s_read_signal_strength_from_cache, |
1923 | .tune = mb86a20s_tune, |
1924 | + .get_frontend_algo = mb86a20s_get_frontend_algo, |
1925 | }; |
1926 | |
1927 | MODULE_DESCRIPTION("DVB Frontend module for Fujitsu mb86A20s hardware"); |
1928 | diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c |
1929 | index 491913778bcc..2f52d66b4dae 100644 |
1930 | --- a/drivers/media/usb/cx231xx/cx231xx-avcore.c |
1931 | +++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c |
1932 | @@ -1264,7 +1264,10 @@ int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, |
1933 | dev->board.agc_analog_digital_select_gpio, |
1934 | analog_or_digital); |
1935 | |
1936 | - return status; |
1937 | + if (status < 0) |
1938 | + return status; |
1939 | + |
1940 | + return 0; |
1941 | } |
1942 | |
1943 | int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3) |
1944 | diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c |
1945 | index c63248a18823..72c246bfaa1c 100644 |
1946 | --- a/drivers/media/usb/cx231xx/cx231xx-cards.c |
1947 | +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c |
1948 | @@ -486,7 +486,7 @@ struct cx231xx_board cx231xx_boards[] = { |
1949 | .output_mode = OUT_MODE_VIP11, |
1950 | .demod_xfer_mode = 0, |
1951 | .ctl_pin_status_mask = 0xFFFFFFC4, |
1952 | - .agc_analog_digital_select_gpio = 0x00, /* According with PV cxPolaris.inf file */ |
1953 | + .agc_analog_digital_select_gpio = 0x1c, |
1954 | .tuner_sif_gpio = -1, |
1955 | .tuner_scl_gpio = -1, |
1956 | .tuner_sda_gpio = -1, |
1957 | diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c |
1958 | index 630f4fc5155f..ea9a99e41581 100644 |
1959 | --- a/drivers/media/usb/cx231xx/cx231xx-core.c |
1960 | +++ b/drivers/media/usb/cx231xx/cx231xx-core.c |
1961 | @@ -712,6 +712,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) |
1962 | break; |
1963 | case CX231XX_BOARD_CNXT_RDE_253S: |
1964 | case CX231XX_BOARD_CNXT_RDU_253S: |
1965 | + case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: |
1966 | errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 1); |
1967 | break; |
1968 | case CX231XX_BOARD_HAUPPAUGE_EXETER: |
1969 | @@ -738,7 +739,7 @@ int cx231xx_set_mode(struct cx231xx *dev, enum cx231xx_mode set_mode) |
1970 | case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: |
1971 | case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: |
1972 | case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: |
1973 | - errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); |
1974 | + errCode = cx231xx_set_agc_analog_digital_mux_select(dev, 0); |
1975 | break; |
1976 | default: |
1977 | break; |
1978 | @@ -1301,15 +1302,29 @@ int cx231xx_dev_init(struct cx231xx *dev) |
1979 | dev->i2c_bus[2].i2c_reserve = 0; |
1980 | |
1981 | /* register I2C buses */ |
1982 | - cx231xx_i2c_register(&dev->i2c_bus[0]); |
1983 | - cx231xx_i2c_register(&dev->i2c_bus[1]); |
1984 | - cx231xx_i2c_register(&dev->i2c_bus[2]); |
1985 | + errCode = cx231xx_i2c_register(&dev->i2c_bus[0]); |
1986 | + if (errCode < 0) |
1987 | + return errCode; |
1988 | + errCode = cx231xx_i2c_register(&dev->i2c_bus[1]); |
1989 | + if (errCode < 0) |
1990 | + return errCode; |
1991 | + errCode = cx231xx_i2c_register(&dev->i2c_bus[2]); |
1992 | + if (errCode < 0) |
1993 | + return errCode; |
1994 | |
1995 | errCode = cx231xx_i2c_mux_create(dev); |
1996 | + if (errCode < 0) { |
1997 | + dev_err(dev->dev, |
1998 | + "%s: Failed to create I2C mux\n", __func__); |
1999 | + return errCode; |
2000 | + } |
2001 | + errCode = cx231xx_i2c_mux_register(dev, 0); |
2002 | + if (errCode < 0) |
2003 | + return errCode; |
2004 | + |
2005 | + errCode = cx231xx_i2c_mux_register(dev, 1); |
2006 | if (errCode < 0) |
2007 | return errCode; |
2008 | - cx231xx_i2c_mux_register(dev, 0); |
2009 | - cx231xx_i2c_mux_register(dev, 1); |
2010 | |
2011 | /* scan the real bus segments in the order of physical port numbers */ |
2012 | cx231xx_do_i2c_scan(dev, I2C_0); |
2013 | diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c |
2014 | index d34bc3530385..2e3cf012ef48 100644 |
2015 | --- a/drivers/memstick/host/rtsx_usb_ms.c |
2016 | +++ b/drivers/memstick/host/rtsx_usb_ms.c |
2017 | @@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) |
2018 | int rc; |
2019 | |
2020 | if (!host->req) { |
2021 | + pm_runtime_get_sync(ms_dev(host)); |
2022 | do { |
2023 | rc = memstick_next_req(msh, &host->req); |
2024 | dev_dbg(ms_dev(host), "next req %d\n", rc); |
2025 | @@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work) |
2026 | host->req->error); |
2027 | } |
2028 | } while (!rc); |
2029 | + pm_runtime_put(ms_dev(host)); |
2030 | } |
2031 | |
2032 | } |
2033 | @@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, |
2034 | dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n", |
2035 | __func__, param, value); |
2036 | |
2037 | + pm_runtime_get_sync(ms_dev(host)); |
2038 | mutex_lock(&ucr->dev_mutex); |
2039 | |
2040 | err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD); |
2041 | @@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh, |
2042 | } |
2043 | out: |
2044 | mutex_unlock(&ucr->dev_mutex); |
2045 | + pm_runtime_put(ms_dev(host)); |
2046 | |
2047 | /* power-on delay */ |
2048 | if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON) |
2049 | @@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host) |
2050 | int err; |
2051 | |
2052 | for (;;) { |
2053 | + pm_runtime_get_sync(ms_dev(host)); |
2054 | mutex_lock(&ucr->dev_mutex); |
2055 | |
2056 | /* Check pending MS card changes */ |
2057 | @@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host) |
2058 | } |
2059 | |
2060 | poll_again: |
2061 | + pm_runtime_put(ms_dev(host)); |
2062 | if (host->eject) |
2063 | break; |
2064 | |
2065 | diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c |
2066 | index f3d34b941f85..af23d7dfe752 100644 |
2067 | --- a/drivers/misc/cxl/api.c |
2068 | +++ b/drivers/misc/cxl/api.c |
2069 | @@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, |
2070 | if (ctx->status == STARTED) |
2071 | goto out; /* already started */ |
2072 | |
2073 | + /* |
2074 | + * Increment the mapped context count for adapter. This also checks |
2075 | + * if adapter_context_lock is taken. |
2076 | + */ |
2077 | + rc = cxl_adapter_context_get(ctx->afu->adapter); |
2078 | + if (rc) |
2079 | + goto out; |
2080 | + |
2081 | if (task) { |
2082 | ctx->pid = get_task_pid(task, PIDTYPE_PID); |
2083 | ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID); |
2084 | @@ -240,6 +248,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, |
2085 | |
2086 | if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) { |
2087 | put_pid(ctx->pid); |
2088 | + cxl_adapter_context_put(ctx->afu->adapter); |
2089 | cxl_ctx_put(); |
2090 | goto out; |
2091 | } |
2092 | diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c |
2093 | index c466ee2b0c97..5e506c19108a 100644 |
2094 | --- a/drivers/misc/cxl/context.c |
2095 | +++ b/drivers/misc/cxl/context.c |
2096 | @@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx) |
2097 | put_pid(ctx->glpid); |
2098 | |
2099 | cxl_ctx_put(); |
2100 | + |
2101 | + /* Decrease the attached context count on the adapter */ |
2102 | + cxl_adapter_context_put(ctx->afu->adapter); |
2103 | return 0; |
2104 | } |
2105 | |
2106 | diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h |
2107 | index 344a0ff8f8c7..19aa2aca9683 100644 |
2108 | --- a/drivers/misc/cxl/cxl.h |
2109 | +++ b/drivers/misc/cxl/cxl.h |
2110 | @@ -615,6 +615,14 @@ struct cxl { |
2111 | bool perst_select_user; |
2112 | bool perst_same_image; |
2113 | bool psl_timebase_synced; |
2114 | + |
2115 | + /* |
2116 | + * number of contexts mapped on to this card. Possible values are: |
2117 | + * >0: Number of contexts mapped and new one can be mapped. |
2118 | + * 0: No active contexts and new ones can be mapped. |
2119 | + * -1: No contexts mapped and new ones cannot be mapped. |
2120 | + */ |
2121 | + atomic_t contexts_num; |
2122 | }; |
2123 | |
2124 | int cxl_pci_alloc_one_irq(struct cxl *adapter); |
2125 | @@ -940,4 +948,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev); |
2126 | |
2127 | /* decode AFU error bits in the PSL register PSL_SERR_An */ |
2128 | void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr); |
2129 | + |
2130 | +/* |
2131 | + * Increments the number of attached contexts on an adapter. |
2132 | + * In case an adapter_context_lock is taken the return -EBUSY. |
2133 | + */ |
2134 | +int cxl_adapter_context_get(struct cxl *adapter); |
2135 | + |
2136 | +/* Decrements the number of attached contexts on an adapter */ |
2137 | +void cxl_adapter_context_put(struct cxl *adapter); |
2138 | + |
2139 | +/* If no active contexts then prevents contexts from being attached */ |
2140 | +int cxl_adapter_context_lock(struct cxl *adapter); |
2141 | + |
2142 | +/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */ |
2143 | +void cxl_adapter_context_unlock(struct cxl *adapter); |
2144 | + |
2145 | #endif |
2146 | diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c |
2147 | index 5fb9894b157f..d0b421f49b39 100644 |
2148 | --- a/drivers/misc/cxl/file.c |
2149 | +++ b/drivers/misc/cxl/file.c |
2150 | @@ -205,11 +205,22 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, |
2151 | ctx->pid = get_task_pid(current, PIDTYPE_PID); |
2152 | ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID); |
2153 | |
2154 | + /* |
2155 | + * Increment the mapped context count for adapter. This also checks |
2156 | + * if adapter_context_lock is taken. |
2157 | + */ |
2158 | + rc = cxl_adapter_context_get(ctx->afu->adapter); |
2159 | + if (rc) { |
2160 | + afu_release_irqs(ctx, ctx); |
2161 | + goto out; |
2162 | + } |
2163 | + |
2164 | trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); |
2165 | |
2166 | if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor, |
2167 | amr))) { |
2168 | afu_release_irqs(ctx, ctx); |
2169 | + cxl_adapter_context_put(ctx->afu->adapter); |
2170 | goto out; |
2171 | } |
2172 | |
2173 | diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c |
2174 | index 9aa58a77a24d..3e102cd6ed91 100644 |
2175 | --- a/drivers/misc/cxl/guest.c |
2176 | +++ b/drivers/misc/cxl/guest.c |
2177 | @@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic |
2178 | if ((rc = cxl_sysfs_adapter_add(adapter))) |
2179 | goto err_put1; |
2180 | |
2181 | + /* release the context lock as the adapter is configured */ |
2182 | + cxl_adapter_context_unlock(adapter); |
2183 | + |
2184 | return adapter; |
2185 | |
2186 | err_put1: |
2187 | diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c |
2188 | index d9be23b24aa3..62e0dfb5f15b 100644 |
2189 | --- a/drivers/misc/cxl/main.c |
2190 | +++ b/drivers/misc/cxl/main.c |
2191 | @@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void) |
2192 | if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)) |
2193 | goto err2; |
2194 | |
2195 | - return adapter; |
2196 | + /* start with context lock taken */ |
2197 | + atomic_set(&adapter->contexts_num, -1); |
2198 | |
2199 | + return adapter; |
2200 | err2: |
2201 | cxl_remove_adapter_nr(adapter); |
2202 | err1: |
2203 | @@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu) |
2204 | return 0; |
2205 | } |
2206 | |
2207 | +int cxl_adapter_context_get(struct cxl *adapter) |
2208 | +{ |
2209 | + int rc; |
2210 | + |
2211 | + rc = atomic_inc_unless_negative(&adapter->contexts_num); |
2212 | + return rc >= 0 ? 0 : -EBUSY; |
2213 | +} |
2214 | + |
2215 | +void cxl_adapter_context_put(struct cxl *adapter) |
2216 | +{ |
2217 | + atomic_dec_if_positive(&adapter->contexts_num); |
2218 | +} |
2219 | + |
2220 | +int cxl_adapter_context_lock(struct cxl *adapter) |
2221 | +{ |
2222 | + int rc; |
2223 | + /* no active contexts -> contexts_num == 0 */ |
2224 | + rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1); |
2225 | + return rc ? -EBUSY : 0; |
2226 | +} |
2227 | + |
2228 | +void cxl_adapter_context_unlock(struct cxl *adapter) |
2229 | +{ |
2230 | + int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0); |
2231 | + |
2232 | + /* |
2233 | + * contexts lock taken -> contexts_num == -1 |
2234 | + * If not true then show a warning and force reset the lock. |
2235 | + * This will happen when context_unlock was requested without |
2236 | + * doing a context_lock. |
2237 | + */ |
2238 | + if (val != -1) { |
2239 | + atomic_set(&adapter->contexts_num, 0); |
2240 | + WARN(1, "Adapter context unlocked with %d active contexts", |
2241 | + val); |
2242 | + } |
2243 | +} |
2244 | + |
2245 | static int __init init_cxl(void) |
2246 | { |
2247 | int rc = 0; |
2248 | diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c |
2249 | index 6f0c4ac4b649..8ad4e4f6ff77 100644 |
2250 | --- a/drivers/misc/cxl/pci.c |
2251 | +++ b/drivers/misc/cxl/pci.c |
2252 | @@ -1484,6 +1484,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) |
2253 | if ((rc = cxl_native_register_psl_err_irq(adapter))) |
2254 | goto err; |
2255 | |
2256 | + /* Release the context lock as adapter is configured */ |
2257 | + cxl_adapter_context_unlock(adapter); |
2258 | return 0; |
2259 | |
2260 | err: |
2261 | diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c |
2262 | index b043c20f158f..a8b6d6a635e9 100644 |
2263 | --- a/drivers/misc/cxl/sysfs.c |
2264 | +++ b/drivers/misc/cxl/sysfs.c |
2265 | @@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device, |
2266 | int val; |
2267 | |
2268 | rc = sscanf(buf, "%i", &val); |
2269 | - if ((rc != 1) || (val != 1)) |
2270 | + if ((rc != 1) || (val != 1 && val != -1)) |
2271 | return -EINVAL; |
2272 | |
2273 | - if ((rc = cxl_ops->adapter_reset(adapter))) |
2274 | - return rc; |
2275 | - return count; |
2276 | + /* |
2277 | + * See if we can lock the context mapping that's only allowed |
2278 | + * when there are no contexts attached to the adapter. Once |
2279 | + * taken this will also prevent any context from getting activated. |
2280 | + */ |
2281 | + if (val == 1) { |
2282 | + rc = cxl_adapter_context_lock(adapter); |
2283 | + if (rc) |
2284 | + goto out; |
2285 | + |
2286 | + rc = cxl_ops->adapter_reset(adapter); |
2287 | + /* In case reset failed release context lock */ |
2288 | + if (rc) |
2289 | + cxl_adapter_context_unlock(adapter); |
2290 | + |
2291 | + } else if (val == -1) { |
2292 | + /* Perform a forced adapter reset */ |
2293 | + rc = cxl_ops->adapter_reset(adapter); |
2294 | + } |
2295 | + |
2296 | +out: |
2297 | + return rc ? rc : count; |
2298 | } |
2299 | |
2300 | static ssize_t load_image_on_perst_show(struct device *device, |
2301 | diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c |
2302 | index fd9271bc1a11..cd01e342bc78 100644 |
2303 | --- a/drivers/misc/mei/amthif.c |
2304 | +++ b/drivers/misc/mei/amthif.c |
2305 | @@ -139,7 +139,7 @@ int mei_amthif_read(struct mei_device *dev, struct file *file, |
2306 | return -ERESTARTSYS; |
2307 | |
2308 | if (!mei_cl_is_connected(cl)) { |
2309 | - rets = -EBUSY; |
2310 | + rets = -ENODEV; |
2311 | goto out; |
2312 | } |
2313 | |
2314 | diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c |
2315 | index e094df3cf2d5..5b5b2e07e99e 100644 |
2316 | --- a/drivers/misc/mei/bus.c |
2317 | +++ b/drivers/misc/mei/bus.c |
2318 | @@ -142,7 +142,7 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length) |
2319 | mutex_lock(&bus->device_lock); |
2320 | |
2321 | if (!mei_cl_is_connected(cl)) { |
2322 | - rets = -EBUSY; |
2323 | + rets = -ENODEV; |
2324 | goto out; |
2325 | } |
2326 | } |
2327 | diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h |
2328 | index 0dcb854b4bfc..7ad15d678878 100644 |
2329 | --- a/drivers/misc/mei/hw-me-regs.h |
2330 | +++ b/drivers/misc/mei/hw-me-regs.h |
2331 | @@ -125,6 +125,9 @@ |
2332 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ |
2333 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ |
2334 | |
2335 | +#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ |
2336 | +#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ |
2337 | + |
2338 | /* |
2339 | * MEI HW Section |
2340 | */ |
2341 | diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c |
2342 | index 52635b063873..080208dc5516 100644 |
2343 | --- a/drivers/misc/mei/main.c |
2344 | +++ b/drivers/misc/mei/main.c |
2345 | @@ -202,7 +202,7 @@ static ssize_t mei_read(struct file *file, char __user *ubuf, |
2346 | |
2347 | mutex_lock(&dev->device_lock); |
2348 | if (!mei_cl_is_connected(cl)) { |
2349 | - rets = -EBUSY; |
2350 | + rets = -ENODEV; |
2351 | goto out; |
2352 | } |
2353 | } |
2354 | diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c |
2355 | index 71cea9b296b2..5eb9b75ae9ec 100644 |
2356 | --- a/drivers/misc/mei/pci-me.c |
2357 | +++ b/drivers/misc/mei/pci-me.c |
2358 | @@ -91,6 +91,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { |
2359 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)}, |
2360 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)}, |
2361 | |
2362 | + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)}, |
2363 | + {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)}, |
2364 | + |
2365 | /* required last entry */ |
2366 | {0, } |
2367 | }; |
2368 | diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c |
2369 | index 2206d4477dbb..17891f17f39d 100644 |
2370 | --- a/drivers/mmc/card/block.c |
2371 | +++ b/drivers/mmc/card/block.c |
2372 | @@ -1778,7 +1778,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, |
2373 | struct mmc_blk_data *md = mq->data; |
2374 | struct mmc_packed *packed = mqrq->packed; |
2375 | bool do_rel_wr, do_data_tag; |
2376 | - u32 *packed_cmd_hdr; |
2377 | + __le32 *packed_cmd_hdr; |
2378 | u8 hdr_blocks; |
2379 | u8 i = 1; |
2380 | |
2381 | @@ -2303,7 +2303,8 @@ again: |
2382 | set_capacity(md->disk, size); |
2383 | |
2384 | if (mmc_host_cmd23(card->host)) { |
2385 | - if (mmc_card_mmc(card) || |
2386 | + if ((mmc_card_mmc(card) && |
2387 | + card->csd.mmca_vsn >= CSD_SPEC_VER_3) || |
2388 | (mmc_card_sd(card) && |
2389 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) |
2390 | md->flags |= MMC_BLK_CMD23; |
2391 | diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h |
2392 | index fee5e1271465..7f16709a5bd5 100644 |
2393 | --- a/drivers/mmc/card/queue.h |
2394 | +++ b/drivers/mmc/card/queue.h |
2395 | @@ -31,7 +31,7 @@ enum mmc_packed_type { |
2396 | |
2397 | struct mmc_packed { |
2398 | struct list_head list; |
2399 | - u32 cmd_hdr[1024]; |
2400 | + __le32 cmd_hdr[1024]; |
2401 | unsigned int blocks; |
2402 | u8 nr_entries; |
2403 | u8 retries; |
2404 | diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c |
2405 | index f2d185cf8a8b..c57eb32dc075 100644 |
2406 | --- a/drivers/mmc/core/mmc.c |
2407 | +++ b/drivers/mmc/core/mmc.c |
2408 | @@ -1259,6 +1259,16 @@ static int mmc_select_hs400es(struct mmc_card *card) |
2409 | goto out_err; |
2410 | } |
2411 | |
2412 | + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V) |
2413 | + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); |
2414 | + |
2415 | + if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V) |
2416 | + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); |
2417 | + |
2418 | + /* If fails try again during next card power cycle */ |
2419 | + if (err) |
2420 | + goto out_err; |
2421 | + |
2422 | err = mmc_select_bus_width(card); |
2423 | if (err < 0) |
2424 | goto out_err; |
2425 | diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c |
2426 | index 6c71fc9f76c7..da9f71b8deb0 100644 |
2427 | --- a/drivers/mmc/host/rtsx_usb_sdmmc.c |
2428 | +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c |
2429 | @@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
2430 | dev_dbg(sdmmc_dev(host), "%s\n", __func__); |
2431 | mutex_lock(&ucr->dev_mutex); |
2432 | |
2433 | - if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) { |
2434 | - mutex_unlock(&ucr->dev_mutex); |
2435 | - return; |
2436 | - } |
2437 | - |
2438 | sd_set_power_mode(host, ios->power_mode); |
2439 | sd_set_bus_width(host, ios->bus_width); |
2440 | sd_set_timing(host, ios->timing, &host->ddr_mode); |
2441 | @@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work) |
2442 | container_of(work, struct rtsx_usb_sdmmc, led_work); |
2443 | struct rtsx_ucr *ucr = host->ucr; |
2444 | |
2445 | + pm_runtime_get_sync(sdmmc_dev(host)); |
2446 | mutex_lock(&ucr->dev_mutex); |
2447 | |
2448 | if (host->led.brightness == LED_OFF) |
2449 | @@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work) |
2450 | rtsx_usb_turn_on_led(ucr); |
2451 | |
2452 | mutex_unlock(&ucr->dev_mutex); |
2453 | + pm_runtime_put(sdmmc_dev(host)); |
2454 | } |
2455 | #endif |
2456 | |
2457 | diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
2458 | index cd65d474afa2..a8a022a7358f 100644 |
2459 | --- a/drivers/mmc/host/sdhci.c |
2460 | +++ b/drivers/mmc/host/sdhci.c |
2461 | @@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) |
2462 | * host->clock is in Hz. target_timeout is in us. |
2463 | * Hence, us = 1000000 * cycles / Hz. Round up. |
2464 | */ |
2465 | - val = 1000000 * data->timeout_clks; |
2466 | + val = 1000000ULL * data->timeout_clks; |
2467 | if (do_div(val, host->clock)) |
2468 | target_timeout++; |
2469 | target_timeout += val; |
2470 | diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c |
2471 | index f4533266d7b2..b419c7cfd014 100644 |
2472 | --- a/drivers/mtd/ubi/wl.c |
2473 | +++ b/drivers/mtd/ubi/wl.c |
2474 | @@ -644,7 +644,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
2475 | int shutdown) |
2476 | { |
2477 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
2478 | - int vol_id = -1, lnum = -1; |
2479 | + int erase = 0, keep = 0, vol_id = -1, lnum = -1; |
2480 | #ifdef CONFIG_MTD_UBI_FASTMAP |
2481 | int anchor = wrk->anchor; |
2482 | #endif |
2483 | @@ -780,6 +780,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
2484 | e1->pnum); |
2485 | scrubbing = 1; |
2486 | goto out_not_moved; |
2487 | + } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) { |
2488 | + /* |
2489 | + * While a full scan would detect interrupted erasures |
2490 | + * at attach time we can face them here when attached from |
2491 | + * Fastmap. |
2492 | + */ |
2493 | + dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure", |
2494 | + e1->pnum); |
2495 | + erase = 1; |
2496 | + goto out_not_moved; |
2497 | } |
2498 | |
2499 | ubi_err(ubi, "error %d while reading VID header from PEB %d", |
2500 | @@ -815,6 +825,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
2501 | * Target PEB had bit-flips or write error - torture it. |
2502 | */ |
2503 | torture = 1; |
2504 | + keep = 1; |
2505 | goto out_not_moved; |
2506 | } |
2507 | |
2508 | @@ -901,7 +912,7 @@ out_not_moved: |
2509 | ubi->erroneous_peb_count += 1; |
2510 | } else if (scrubbing) |
2511 | wl_tree_add(e1, &ubi->scrub); |
2512 | - else |
2513 | + else if (keep) |
2514 | wl_tree_add(e1, &ubi->used); |
2515 | if (dst_leb_clean) { |
2516 | wl_tree_add(e2, &ubi->free); |
2517 | @@ -922,6 +933,12 @@ out_not_moved: |
2518 | goto out_ro; |
2519 | } |
2520 | |
2521 | + if (erase) { |
2522 | + err = do_sync_erase(ubi, e1, vol_id, lnum, 1); |
2523 | + if (err) |
2524 | + goto out_ro; |
2525 | + } |
2526 | + |
2527 | mutex_unlock(&ubi->move_mutex); |
2528 | return 0; |
2529 | |
2530 | diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c |
2531 | index 9fb8d7472d18..da9998ea9271 100644 |
2532 | --- a/drivers/net/wireless/ath/ath10k/ce.c |
2533 | +++ b/drivers/net/wireless/ath/ath10k/ce.c |
2534 | @@ -433,6 +433,13 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) |
2535 | unsigned int nentries_mask = dest_ring->nentries_mask; |
2536 | unsigned int write_index = dest_ring->write_index; |
2537 | u32 ctrl_addr = pipe->ctrl_addr; |
2538 | + u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); |
2539 | + |
2540 | + /* Prevent CE ring stuck issue that will occur when ring is full. |
2541 | + * Make sure that write index is 1 less than read index. |
2542 | + */ |
2543 | + if ((cur_write_idx + nentries) == dest_ring->sw_index) |
2544 | + nentries -= 1; |
2545 | |
2546 | write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries); |
2547 | ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); |
2548 | diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c |
2549 | index 3524441fd516..6ee6bf8e7eaf 100644 |
2550 | --- a/drivers/net/wireless/realtek/rtlwifi/regd.c |
2551 | +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c |
2552 | @@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( |
2553 | return &rtl_regdom_no_midband; |
2554 | case COUNTRY_CODE_IC: |
2555 | return &rtl_regdom_11; |
2556 | - case COUNTRY_CODE_ETSI: |
2557 | case COUNTRY_CODE_TELEC_NETGEAR: |
2558 | return &rtl_regdom_60_64; |
2559 | + case COUNTRY_CODE_ETSI: |
2560 | case COUNTRY_CODE_SPAIN: |
2561 | case COUNTRY_CODE_FRANCE: |
2562 | case COUNTRY_CODE_ISRAEL: |
2563 | @@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan) |
2564 | return COUNTRY_CODE_WORLD_WIDE_13; |
2565 | case 0x22: |
2566 | return COUNTRY_CODE_IC; |
2567 | + case 0x25: |
2568 | + return COUNTRY_CODE_ETSI; |
2569 | case 0x32: |
2570 | return COUNTRY_CODE_TELEC_NETGEAR; |
2571 | case 0x41: |
2572 | diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c |
2573 | index 6de0757b11e4..84d650d892e7 100644 |
2574 | --- a/drivers/pci/host/pci-tegra.c |
2575 | +++ b/drivers/pci/host/pci-tegra.c |
2576 | @@ -856,7 +856,7 @@ static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) |
2577 | /* override IDDQ */ |
2578 | value = pads_readl(pcie, PADS_CTL); |
2579 | value |= PADS_CTL_IDDQ_1L; |
2580 | - pads_writel(pcie, PADS_CTL, value); |
2581 | + pads_writel(pcie, value, PADS_CTL); |
2582 | |
2583 | /* reset PLL */ |
2584 | value = pads_readl(pcie, soc->pads_pll_ctl); |
2585 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
2586 | index 44e0ff37480b..4bf1a88d7ba7 100644 |
2587 | --- a/drivers/pci/quirks.c |
2588 | +++ b/drivers/pci/quirks.c |
2589 | @@ -3198,6 +3198,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev) |
2590 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); |
2591 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); |
2592 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); |
2593 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); |
2594 | |
2595 | static void quirk_no_pm_reset(struct pci_dev *dev) |
2596 | { |
2597 | diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c |
2598 | index d22a9fe2e6df..71bbeb9321ba 100644 |
2599 | --- a/drivers/pinctrl/intel/pinctrl-baytrail.c |
2600 | +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c |
2601 | @@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev) |
2602 | return PTR_ERR(vg->pctl_dev); |
2603 | } |
2604 | |
2605 | + raw_spin_lock_init(&vg->lock); |
2606 | + |
2607 | ret = byt_gpio_probe(vg); |
2608 | if (ret) { |
2609 | pinctrl_unregister(vg->pctl_dev); |
2610 | @@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev) |
2611 | } |
2612 | |
2613 | platform_set_drvdata(pdev, vg); |
2614 | - raw_spin_lock_init(&vg->lock); |
2615 | pm_runtime_enable(&pdev->dev); |
2616 | |
2617 | return 0; |
2618 | diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c |
2619 | index 257cab129692..2b5b20bf7d99 100644 |
2620 | --- a/drivers/pinctrl/intel/pinctrl-intel.c |
2621 | +++ b/drivers/pinctrl/intel/pinctrl-intel.c |
2622 | @@ -19,6 +19,7 @@ |
2623 | #include <linux/pinctrl/pinconf.h> |
2624 | #include <linux/pinctrl/pinconf-generic.h> |
2625 | |
2626 | +#include "../core.h" |
2627 | #include "pinctrl-intel.h" |
2628 | |
2629 | /* Offset from regs */ |
2630 | @@ -1079,6 +1080,26 @@ int intel_pinctrl_remove(struct platform_device *pdev) |
2631 | EXPORT_SYMBOL_GPL(intel_pinctrl_remove); |
2632 | |
2633 | #ifdef CONFIG_PM_SLEEP |
2634 | +static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin) |
2635 | +{ |
2636 | + const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin); |
2637 | + |
2638 | + if (!pd || !intel_pad_usable(pctrl, pin)) |
2639 | + return false; |
2640 | + |
2641 | + /* |
2642 | + * Only restore the pin if it is actually in use by the kernel (or |
2643 | + * by userspace). It is possible that some pins are used by the |
2644 | + * BIOS during resume and those are not always locked down so leave |
2645 | + * them alone. |
2646 | + */ |
2647 | + if (pd->mux_owner || pd->gpio_owner || |
2648 | + gpiochip_line_is_irq(&pctrl->chip, pin)) |
2649 | + return true; |
2650 | + |
2651 | + return false; |
2652 | +} |
2653 | + |
2654 | int intel_pinctrl_suspend(struct device *dev) |
2655 | { |
2656 | struct platform_device *pdev = to_platform_device(dev); |
2657 | @@ -1092,7 +1113,7 @@ int intel_pinctrl_suspend(struct device *dev) |
2658 | const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i]; |
2659 | u32 val; |
2660 | |
2661 | - if (!intel_pad_usable(pctrl, desc->number)) |
2662 | + if (!intel_pinctrl_should_save(pctrl, desc->number)) |
2663 | continue; |
2664 | |
2665 | val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0)); |
2666 | @@ -1153,7 +1174,7 @@ int intel_pinctrl_resume(struct device *dev) |
2667 | void __iomem *padcfg; |
2668 | u32 val; |
2669 | |
2670 | - if (!intel_pad_usable(pctrl, desc->number)) |
2671 | + if (!intel_pinctrl_should_save(pctrl, desc->number)) |
2672 | continue; |
2673 | |
2674 | padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0); |
2675 | diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c |
2676 | index fb991ec76423..696116ebdf50 100644 |
2677 | --- a/drivers/regulator/tps65910-regulator.c |
2678 | +++ b/drivers/regulator/tps65910-regulator.c |
2679 | @@ -1111,6 +1111,12 @@ static int tps65910_probe(struct platform_device *pdev) |
2680 | pmic->num_regulators = ARRAY_SIZE(tps65910_regs); |
2681 | pmic->ext_sleep_control = tps65910_ext_sleep_control; |
2682 | info = tps65910_regs; |
2683 | + /* Work around silicon erratum SWCZ010: output programmed |
2684 | + * voltage level can go higher than expected or crash |
2685 | + * Workaround: use no synchronization of DCDC clocks |
2686 | + */ |
2687 | + tps65910_reg_clear_bits(pmic->mfd, TPS65910_DCDCCTRL, |
2688 | + DCDCCTRL_DCDCCKSYNC_MASK); |
2689 | break; |
2690 | case TPS65911: |
2691 | pmic->get_ctrl_reg = &tps65911_get_ctrl_register; |
2692 | diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c |
2693 | index 5d7fbe4e907e..581001989937 100644 |
2694 | --- a/drivers/s390/scsi/zfcp_dbf.c |
2695 | +++ b/drivers/s390/scsi/zfcp_dbf.c |
2696 | @@ -3,7 +3,7 @@ |
2697 | * |
2698 | * Debug traces for zfcp. |
2699 | * |
2700 | - * Copyright IBM Corp. 2002, 2013 |
2701 | + * Copyright IBM Corp. 2002, 2016 |
2702 | */ |
2703 | |
2704 | #define KMSG_COMPONENT "zfcp" |
2705 | @@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area, |
2706 | * @tag: tag indicating which kind of unsolicited status has been received |
2707 | * @req: request for which a response was received |
2708 | */ |
2709 | -void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) |
2710 | +void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req) |
2711 | { |
2712 | struct zfcp_dbf *dbf = req->adapter->dbf; |
2713 | struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; |
2714 | @@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) |
2715 | rec->u.res.req_issued = req->issued; |
2716 | rec->u.res.prot_status = q_pref->prot_status; |
2717 | rec->u.res.fsf_status = q_head->fsf_status; |
2718 | + rec->u.res.port_handle = q_head->port_handle; |
2719 | + rec->u.res.lun_handle = q_head->lun_handle; |
2720 | |
2721 | memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, |
2722 | FSF_PROT_STATUS_QUAL_SIZE); |
2723 | @@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) |
2724 | rec->pl_len, "fsf_res", req->req_id); |
2725 | } |
2726 | |
2727 | - debug_event(dbf->hba, 1, rec, sizeof(*rec)); |
2728 | + debug_event(dbf->hba, level, rec, sizeof(*rec)); |
2729 | spin_unlock_irqrestore(&dbf->hba_lock, flags); |
2730 | } |
2731 | |
2732 | @@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, |
2733 | if (sdev) { |
2734 | rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); |
2735 | rec->lun = zfcp_scsi_dev_lun(sdev); |
2736 | - } |
2737 | + } else |
2738 | + rec->lun = ZFCP_DBF_INVALID_LUN; |
2739 | } |
2740 | |
2741 | /** |
2742 | @@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) |
2743 | spin_unlock_irqrestore(&dbf->rec_lock, flags); |
2744 | } |
2745 | |
2746 | +/** |
2747 | + * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery |
2748 | + * @tag: identifier for event |
2749 | + * @wka_port: well known address port |
2750 | + * @req_id: request ID to correlate with potential HBA trace record |
2751 | + */ |
2752 | +void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, |
2753 | + u64 req_id) |
2754 | +{ |
2755 | + struct zfcp_dbf *dbf = wka_port->adapter->dbf; |
2756 | + struct zfcp_dbf_rec *rec = &dbf->rec_buf; |
2757 | + unsigned long flags; |
2758 | + |
2759 | + spin_lock_irqsave(&dbf->rec_lock, flags); |
2760 | + memset(rec, 0, sizeof(*rec)); |
2761 | + |
2762 | + rec->id = ZFCP_DBF_REC_RUN; |
2763 | + memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
2764 | + rec->port_status = wka_port->status; |
2765 | + rec->d_id = wka_port->d_id; |
2766 | + rec->lun = ZFCP_DBF_INVALID_LUN; |
2767 | + |
2768 | + rec->u.run.fsf_req_id = req_id; |
2769 | + rec->u.run.rec_status = ~0; |
2770 | + rec->u.run.rec_step = ~0; |
2771 | + rec->u.run.rec_action = ~0; |
2772 | + rec->u.run.rec_count = ~0; |
2773 | + |
2774 | + debug_event(dbf->rec, 1, rec, sizeof(*rec)); |
2775 | + spin_unlock_irqrestore(&dbf->rec_lock, flags); |
2776 | +} |
2777 | + |
2778 | static inline |
2779 | -void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, |
2780 | - u64 req_id, u32 d_id) |
2781 | +void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, |
2782 | + char *paytag, struct scatterlist *sg, u8 id, u16 len, |
2783 | + u64 req_id, u32 d_id, u16 cap_len) |
2784 | { |
2785 | struct zfcp_dbf_san *rec = &dbf->san_buf; |
2786 | u16 rec_len; |
2787 | unsigned long flags; |
2788 | + struct zfcp_dbf_pay *payload = &dbf->pay_buf; |
2789 | + u16 pay_sum = 0; |
2790 | |
2791 | spin_lock_irqsave(&dbf->san_lock, flags); |
2792 | memset(rec, 0, sizeof(*rec)); |
2793 | @@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, |
2794 | rec->id = id; |
2795 | rec->fsf_req_id = req_id; |
2796 | rec->d_id = d_id; |
2797 | - rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD); |
2798 | - memcpy(rec->payload, data, rec_len); |
2799 | memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); |
2800 | + rec->pl_len = len; /* full length even if we cap pay below */ |
2801 | + if (!sg) |
2802 | + goto out; |
2803 | + rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD); |
2804 | + memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */ |
2805 | + if (len <= rec_len) |
2806 | + goto out; /* skip pay record if full content in rec->payload */ |
2807 | + |
2808 | + /* if (len > rec_len): |
2809 | + * dump data up to cap_len ignoring small duplicate in rec->payload |
2810 | + */ |
2811 | + spin_lock(&dbf->pay_lock); |
2812 | + memset(payload, 0, sizeof(*payload)); |
2813 | + memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN); |
2814 | + payload->fsf_req_id = req_id; |
2815 | + payload->counter = 0; |
2816 | + for (; sg && pay_sum < cap_len; sg = sg_next(sg)) { |
2817 | + u16 pay_len, offset = 0; |
2818 | + |
2819 | + while (offset < sg->length && pay_sum < cap_len) { |
2820 | + pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC, |
2821 | + (u16)(sg->length - offset)); |
2822 | + /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */ |
2823 | + memcpy(payload->data, sg_virt(sg) + offset, pay_len); |
2824 | + debug_event(dbf->pay, 1, payload, |
2825 | + zfcp_dbf_plen(pay_len)); |
2826 | + payload->counter++; |
2827 | + offset += pay_len; |
2828 | + pay_sum += pay_len; |
2829 | + } |
2830 | + } |
2831 | + spin_unlock(&dbf->pay_lock); |
2832 | |
2833 | +out: |
2834 | debug_event(dbf->san, 1, rec, sizeof(*rec)); |
2835 | spin_unlock_irqrestore(&dbf->san_lock, flags); |
2836 | } |
2837 | @@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) |
2838 | struct zfcp_fsf_ct_els *ct_els = fsf->data; |
2839 | u16 length; |
2840 | |
2841 | - length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); |
2842 | - zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, |
2843 | - fsf->req_id, d_id); |
2844 | + length = (u16)zfcp_qdio_real_bytes(ct_els->req); |
2845 | + zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ, |
2846 | + length, fsf->req_id, d_id, length); |
2847 | +} |
2848 | + |
2849 | +static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag, |
2850 | + struct zfcp_fsf_req *fsf, |
2851 | + u16 len) |
2852 | +{ |
2853 | + struct zfcp_fsf_ct_els *ct_els = fsf->data; |
2854 | + struct fc_ct_hdr *reqh = sg_virt(ct_els->req); |
2855 | + struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1); |
2856 | + struct scatterlist *resp_entry = ct_els->resp; |
2857 | + struct fc_gpn_ft_resp *acc; |
2858 | + int max_entries, x, last = 0; |
2859 | + |
2860 | + if (!(memcmp(tag, "fsscth2", 7) == 0 |
2861 | + && ct_els->d_id == FC_FID_DIR_SERV |
2862 | + && reqh->ct_rev == FC_CT_REV |
2863 | + && reqh->ct_in_id[0] == 0 |
2864 | + && reqh->ct_in_id[1] == 0 |
2865 | + && reqh->ct_in_id[2] == 0 |
2866 | + && reqh->ct_fs_type == FC_FST_DIR |
2867 | + && reqh->ct_fs_subtype == FC_NS_SUBTYPE |
2868 | + && reqh->ct_options == 0 |
2869 | + && reqh->_ct_resvd1 == 0 |
2870 | + && reqh->ct_cmd == FC_NS_GPN_FT |
2871 | + /* reqh->ct_mr_size can vary so do not match but read below */ |
2872 | + && reqh->_ct_resvd2 == 0 |
2873 | + && reqh->ct_reason == 0 |
2874 | + && reqh->ct_explan == 0 |
2875 | + && reqh->ct_vendor == 0 |
2876 | + && reqn->fn_resvd == 0 |
2877 | + && reqn->fn_domain_id_scope == 0 |
2878 | + && reqn->fn_area_id_scope == 0 |
2879 | + && reqn->fn_fc4_type == FC_TYPE_FCP)) |
2880 | + return len; /* not GPN_FT response so do not cap */ |
2881 | + |
2882 | + acc = sg_virt(resp_entry); |
2883 | + max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp)) |
2884 | + + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one |
2885 | + * to account for header as 1st pseudo "entry" */; |
2886 | + |
2887 | + /* the basic CT_IU preamble is the same size as one entry in the GPN_FT |
2888 | + * response, allowing us to skip special handling for it - just skip it |
2889 | + */ |
2890 | + for (x = 1; x < max_entries && !last; x++) { |
2891 | + if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1)) |
2892 | + acc++; |
2893 | + else |
2894 | + acc = sg_virt(++resp_entry); |
2895 | + |
2896 | + last = acc->fp_flags & FC_NS_FID_LAST; |
2897 | + } |
2898 | + len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp))); |
2899 | + return len; /* cap after last entry */ |
2900 | } |
2901 | |
2902 | /** |
2903 | @@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) |
2904 | struct zfcp_fsf_ct_els *ct_els = fsf->data; |
2905 | u16 length; |
2906 | |
2907 | - length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); |
2908 | - zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, |
2909 | - fsf->req_id, 0); |
2910 | + length = (u16)zfcp_qdio_real_bytes(ct_els->resp); |
2911 | + zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES, |
2912 | + length, fsf->req_id, ct_els->d_id, |
2913 | + zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length)); |
2914 | } |
2915 | |
2916 | /** |
2917 | @@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) |
2918 | struct fsf_status_read_buffer *srb = |
2919 | (struct fsf_status_read_buffer *) fsf->data; |
2920 | u16 length; |
2921 | + struct scatterlist sg; |
2922 | |
2923 | length = (u16)(srb->length - |
2924 | offsetof(struct fsf_status_read_buffer, payload)); |
2925 | - zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, |
2926 | - fsf->req_id, ntoh24(srb->d_id)); |
2927 | + sg_init_one(&sg, srb->payload.data, length); |
2928 | + zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length, |
2929 | + fsf->req_id, ntoh24(srb->d_id), length); |
2930 | } |
2931 | |
2932 | /** |
2933 | @@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) |
2934 | * @sc: pointer to struct scsi_cmnd |
2935 | * @fsf: pointer to struct zfcp_fsf_req |
2936 | */ |
2937 | -void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) |
2938 | +void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc, |
2939 | + struct zfcp_fsf_req *fsf) |
2940 | { |
2941 | struct zfcp_adapter *adapter = |
2942 | (struct zfcp_adapter *) sc->device->host->hostdata[0]; |
2943 | @@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) |
2944 | } |
2945 | } |
2946 | |
2947 | - debug_event(dbf->scsi, 1, rec, sizeof(*rec)); |
2948 | + debug_event(dbf->scsi, level, rec, sizeof(*rec)); |
2949 | spin_unlock_irqrestore(&dbf->scsi_lock, flags); |
2950 | } |
2951 | |
2952 | diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h |
2953 | index 0be3d48681ae..36d07584271d 100644 |
2954 | --- a/drivers/s390/scsi/zfcp_dbf.h |
2955 | +++ b/drivers/s390/scsi/zfcp_dbf.h |
2956 | @@ -2,7 +2,7 @@ |
2957 | * zfcp device driver |
2958 | * debug feature declarations |
2959 | * |
2960 | - * Copyright IBM Corp. 2008, 2010 |
2961 | + * Copyright IBM Corp. 2008, 2015 |
2962 | */ |
2963 | |
2964 | #ifndef ZFCP_DBF_H |
2965 | @@ -17,6 +17,11 @@ |
2966 | |
2967 | #define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull |
2968 | |
2969 | +enum zfcp_dbf_pseudo_erp_act_type { |
2970 | + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff, |
2971 | + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe, |
2972 | +}; |
2973 | + |
2974 | /** |
2975 | * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action |
2976 | * @ready: number of ready recovery actions |
2977 | @@ -110,6 +115,7 @@ struct zfcp_dbf_san { |
2978 | u32 d_id; |
2979 | #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32) |
2980 | char payload[ZFCP_DBF_SAN_MAX_PAYLOAD]; |
2981 | + u16 pl_len; |
2982 | } __packed; |
2983 | |
2984 | /** |
2985 | @@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res { |
2986 | u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; |
2987 | u32 fsf_status; |
2988 | u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; |
2989 | + u32 port_handle; |
2990 | + u32 lun_handle; |
2991 | } __packed; |
2992 | |
2993 | /** |
2994 | @@ -279,7 +287,7 @@ static inline |
2995 | void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) |
2996 | { |
2997 | if (debug_level_enabled(req->adapter->dbf->hba, level)) |
2998 | - zfcp_dbf_hba_fsf_res(tag, req); |
2999 | + zfcp_dbf_hba_fsf_res(tag, level, req); |
3000 | } |
3001 | |
3002 | /** |
3003 | @@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd, |
3004 | scmd->device->host->hostdata[0]; |
3005 | |
3006 | if (debug_level_enabled(adapter->dbf->scsi, level)) |
3007 | - zfcp_dbf_scsi(tag, scmd, req); |
3008 | + zfcp_dbf_scsi(tag, level, scmd, req); |
3009 | } |
3010 | |
3011 | /** |
3012 | diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c |
3013 | index 3fb410977014..a59d678125bd 100644 |
3014 | --- a/drivers/s390/scsi/zfcp_erp.c |
3015 | +++ b/drivers/s390/scsi/zfcp_erp.c |
3016 | @@ -3,7 +3,7 @@ |
3017 | * |
3018 | * Error Recovery Procedures (ERP). |
3019 | * |
3020 | - * Copyright IBM Corp. 2002, 2010 |
3021 | + * Copyright IBM Corp. 2002, 2015 |
3022 | */ |
3023 | |
3024 | #define KMSG_COMPONENT "zfcp" |
3025 | @@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) |
3026 | break; |
3027 | |
3028 | case ZFCP_ERP_ACTION_REOPEN_PORT: |
3029 | - if (result == ZFCP_ERP_SUCCEEDED) |
3030 | - zfcp_scsi_schedule_rport_register(port); |
3031 | + /* This switch case might also happen after a forced reopen |
3032 | + * was successfully done and thus overwritten with a new |
3033 | + * non-forced reopen at `ersfs_2'. In this case, we must not |
3034 | + * do the clean-up of the non-forced version. |
3035 | + */ |
3036 | + if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) |
3037 | + if (result == ZFCP_ERP_SUCCEEDED) |
3038 | + zfcp_scsi_schedule_rport_register(port); |
3039 | /* fall through */ |
3040 | case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: |
3041 | put_device(&port->dev); |
3042 | diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h |
3043 | index 5b500652572b..c8fed9fa1cca 100644 |
3044 | --- a/drivers/s390/scsi/zfcp_ext.h |
3045 | +++ b/drivers/s390/scsi/zfcp_ext.h |
3046 | @@ -3,7 +3,7 @@ |
3047 | * |
3048 | * External function declarations. |
3049 | * |
3050 | - * Copyright IBM Corp. 2002, 2010 |
3051 | + * Copyright IBM Corp. 2002, 2015 |
3052 | */ |
3053 | |
3054 | #ifndef ZFCP_EXT_H |
3055 | @@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); |
3056 | extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, |
3057 | struct zfcp_port *, struct scsi_device *, u8, u8); |
3058 | extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); |
3059 | +extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); |
3060 | extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); |
3061 | -extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); |
3062 | +extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); |
3063 | extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); |
3064 | extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); |
3065 | extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); |
3066 | @@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *); |
3067 | extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); |
3068 | extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); |
3069 | extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); |
3070 | -extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *); |
3071 | +extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *, |
3072 | + struct zfcp_fsf_req *); |
3073 | |
3074 | /* zfcp_erp.c */ |
3075 | extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); |
3076 | diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c |
3077 | index 522a633c866a..75f820ca17b7 100644 |
3078 | --- a/drivers/s390/scsi/zfcp_fsf.c |
3079 | +++ b/drivers/s390/scsi/zfcp_fsf.c |
3080 | @@ -3,7 +3,7 @@ |
3081 | * |
3082 | * Implementation of FSF commands. |
3083 | * |
3084 | - * Copyright IBM Corp. 2002, 2013 |
3085 | + * Copyright IBM Corp. 2002, 2015 |
3086 | */ |
3087 | |
3088 | #define KMSG_COMPONENT "zfcp" |
3089 | @@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) |
3090 | fc_host_port_type(shost) = FC_PORTTYPE_PTP; |
3091 | break; |
3092 | case FSF_TOPO_FABRIC: |
3093 | - fc_host_port_type(shost) = FC_PORTTYPE_NPORT; |
3094 | + if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) |
3095 | + fc_host_port_type(shost) = FC_PORTTYPE_NPIV; |
3096 | + else |
3097 | + fc_host_port_type(shost) = FC_PORTTYPE_NPORT; |
3098 | break; |
3099 | case FSF_TOPO_AL: |
3100 | fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; |
3101 | @@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) |
3102 | |
3103 | if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { |
3104 | fc_host_permanent_port_name(shost) = bottom->wwpn; |
3105 | - fc_host_port_type(shost) = FC_PORTTYPE_NPIV; |
3106 | } else |
3107 | fc_host_permanent_port_name(shost) = fc_host_port_name(shost); |
3108 | fc_host_maxframe_size(shost) = bottom->maximum_frame_size; |
3109 | @@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, |
3110 | if (zfcp_adapter_multi_buffer_active(adapter)) { |
3111 | if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) |
3112 | return -EIO; |
3113 | + qtcb->bottom.support.req_buf_length = |
3114 | + zfcp_qdio_real_bytes(sg_req); |
3115 | if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) |
3116 | return -EIO; |
3117 | + qtcb->bottom.support.resp_buf_length = |
3118 | + zfcp_qdio_real_bytes(sg_resp); |
3119 | |
3120 | zfcp_qdio_set_data_div(qdio, &req->qdio_req, |
3121 | zfcp_qdio_sbale_count(sg_req)); |
3122 | @@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, |
3123 | |
3124 | req->handler = zfcp_fsf_send_ct_handler; |
3125 | req->qtcb->header.port_handle = wka_port->handle; |
3126 | + ct->d_id = wka_port->d_id; |
3127 | req->data = ct; |
3128 | |
3129 | zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); |
3130 | @@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, |
3131 | |
3132 | hton24(req->qtcb->bottom.support.d_id, d_id); |
3133 | req->handler = zfcp_fsf_send_els_handler; |
3134 | + els->d_id = d_id; |
3135 | req->data = els; |
3136 | |
3137 | zfcp_dbf_san_req("fssels1", req, d_id); |
3138 | @@ -1575,7 +1583,7 @@ out: |
3139 | int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) |
3140 | { |
3141 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
3142 | - struct zfcp_fsf_req *req; |
3143 | + struct zfcp_fsf_req *req = NULL; |
3144 | int retval = -EIO; |
3145 | |
3146 | spin_lock_irq(&qdio->req_q_lock); |
3147 | @@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) |
3148 | zfcp_fsf_req_free(req); |
3149 | out: |
3150 | spin_unlock_irq(&qdio->req_q_lock); |
3151 | + if (req && !IS_ERR(req)) |
3152 | + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id); |
3153 | return retval; |
3154 | } |
3155 | |
3156 | @@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) |
3157 | int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) |
3158 | { |
3159 | struct zfcp_qdio *qdio = wka_port->adapter->qdio; |
3160 | - struct zfcp_fsf_req *req; |
3161 | + struct zfcp_fsf_req *req = NULL; |
3162 | int retval = -EIO; |
3163 | |
3164 | spin_lock_irq(&qdio->req_q_lock); |
3165 | @@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) |
3166 | zfcp_fsf_req_free(req); |
3167 | out: |
3168 | spin_unlock_irq(&qdio->req_q_lock); |
3169 | + if (req && !IS_ERR(req)) |
3170 | + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id); |
3171 | return retval; |
3172 | } |
3173 | |
3174 | diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h |
3175 | index 57ae3ae1046d..be1c04b334c5 100644 |
3176 | --- a/drivers/s390/scsi/zfcp_fsf.h |
3177 | +++ b/drivers/s390/scsi/zfcp_fsf.h |
3178 | @@ -3,7 +3,7 @@ |
3179 | * |
3180 | * Interface to the FSF support functions. |
3181 | * |
3182 | - * Copyright IBM Corp. 2002, 2010 |
3183 | + * Copyright IBM Corp. 2002, 2015 |
3184 | */ |
3185 | |
3186 | #ifndef FSF_H |
3187 | @@ -436,6 +436,7 @@ struct zfcp_blk_drv_data { |
3188 | * @handler_data: data passed to handler function |
3189 | * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC) |
3190 | * @status: used to pass error status to calling function |
3191 | + * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS |
3192 | */ |
3193 | struct zfcp_fsf_ct_els { |
3194 | struct scatterlist *req; |
3195 | @@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els { |
3196 | void *handler_data; |
3197 | struct zfcp_port *port; |
3198 | int status; |
3199 | + u32 d_id; |
3200 | }; |
3201 | |
3202 | #endif /* FSF_H */ |
3203 | diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c |
3204 | index b3c6ff49103b..9069f98a1817 100644 |
3205 | --- a/drivers/s390/scsi/zfcp_scsi.c |
3206 | +++ b/drivers/s390/scsi/zfcp_scsi.c |
3207 | @@ -3,7 +3,7 @@ |
3208 | * |
3209 | * Interface to Linux SCSI midlayer. |
3210 | * |
3211 | - * Copyright IBM Corp. 2002, 2013 |
3212 | + * Copyright IBM Corp. 2002, 2015 |
3213 | */ |
3214 | |
3215 | #define KMSG_COMPONENT "zfcp" |
3216 | @@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port) |
3217 | ids.port_id = port->d_id; |
3218 | ids.roles = FC_RPORT_ROLE_FCP_TARGET; |
3219 | |
3220 | + zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL, |
3221 | + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD, |
3222 | + ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD); |
3223 | rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); |
3224 | if (!rport) { |
3225 | dev_err(&port->adapter->ccw_device->dev, |
3226 | @@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port) |
3227 | struct fc_rport *rport = port->rport; |
3228 | |
3229 | if (rport) { |
3230 | + zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL, |
3231 | + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL, |
3232 | + ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL); |
3233 | fc_remote_port_delete(rport); |
3234 | port->rport = NULL; |
3235 | } |
3236 | diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c |
3237 | index e0a78f53d809..bac8cdf9fb23 100644 |
3238 | --- a/drivers/scsi/scsi_scan.c |
3239 | +++ b/drivers/scsi/scsi_scan.c |
3240 | @@ -1472,12 +1472,12 @@ retry: |
3241 | out_err: |
3242 | kfree(lun_data); |
3243 | out: |
3244 | - scsi_device_put(sdev); |
3245 | if (scsi_device_created(sdev)) |
3246 | /* |
3247 | * the sdev we used didn't appear in the report luns scan |
3248 | */ |
3249 | __scsi_remove_device(sdev); |
3250 | + scsi_device_put(sdev); |
3251 | return ret; |
3252 | } |
3253 | |
3254 | diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c |
3255 | index 333eb2215a57..0aaf429f31d5 100644 |
3256 | --- a/drivers/soc/fsl/qe/gpio.c |
3257 | +++ b/drivers/soc/fsl/qe/gpio.c |
3258 | @@ -41,7 +41,8 @@ struct qe_gpio_chip { |
3259 | |
3260 | static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) |
3261 | { |
3262 | - struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc); |
3263 | + struct qe_gpio_chip *qe_gc = |
3264 | + container_of(mm_gc, struct qe_gpio_chip, mm_gc); |
3265 | struct qe_pio_regs __iomem *regs = mm_gc->regs; |
3266 | |
3267 | qe_gc->cpdata = in_be32(®s->cpdata); |
3268 | diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c |
3269 | index 41eff805a904..104e68d9b84f 100644 |
3270 | --- a/drivers/soc/fsl/qe/qe_common.c |
3271 | +++ b/drivers/soc/fsl/qe/qe_common.c |
3272 | @@ -70,6 +70,11 @@ int cpm_muram_init(void) |
3273 | } |
3274 | |
3275 | muram_pool = gen_pool_create(0, -1); |
3276 | + if (!muram_pool) { |
3277 | + pr_err("Cannot allocate memory pool for CPM/QE muram"); |
3278 | + ret = -ENOMEM; |
3279 | + goto out_muram; |
3280 | + } |
3281 | muram_pbase = of_translate_address(np, zero); |
3282 | if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { |
3283 | pr_err("Cannot translate zero through CPM muram node"); |
3284 | @@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size, |
3285 | struct muram_block *entry; |
3286 | unsigned long start; |
3287 | |
3288 | + if (!muram_pool && cpm_muram_init()) |
3289 | + goto out2; |
3290 | + |
3291 | start = gen_pool_alloc_algo(muram_pool, size, algo, data); |
3292 | if (!start) |
3293 | goto out2; |
3294 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
3295 | index 6094a6beddde..e825d580ccee 100644 |
3296 | --- a/drivers/target/target_core_transport.c |
3297 | +++ b/drivers/target/target_core_transport.c |
3298 | @@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd); |
3299 | |
3300 | void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) |
3301 | { |
3302 | - if (scsi_status != SAM_STAT_GOOD) { |
3303 | - return; |
3304 | - } |
3305 | - |
3306 | - /* |
3307 | - * Calculate new residual count based upon length of SCSI data |
3308 | - * transferred. |
3309 | - */ |
3310 | - if (length < cmd->data_length) { |
3311 | + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) { |
3312 | if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { |
3313 | cmd->residual_count += cmd->data_length - length; |
3314 | } else { |
3315 | @@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len |
3316 | } |
3317 | |
3318 | cmd->data_length = length; |
3319 | - } else if (length > cmd->data_length) { |
3320 | - cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; |
3321 | - cmd->residual_count = length - cmd->data_length; |
3322 | - } else { |
3323 | - cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT); |
3324 | - cmd->residual_count = 0; |
3325 | } |
3326 | |
3327 | target_complete_cmd(cmd, scsi_status); |
3328 | @@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd, |
3329 | case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: |
3330 | case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: |
3331 | case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: |
3332 | + case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: |
3333 | break; |
3334 | case TCM_OUT_OF_RESOURCES: |
3335 | sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3336 | @@ -2547,8 +2534,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) |
3337 | * fabric acknowledgement that requires two target_put_sess_cmd() |
3338 | * invocations before se_cmd descriptor release. |
3339 | */ |
3340 | - if (ack_kref) |
3341 | + if (ack_kref) { |
3342 | kref_get(&se_cmd->cmd_kref); |
3343 | + se_cmd->se_cmd_flags |= SCF_ACK_KREF; |
3344 | + } |
3345 | |
3346 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
3347 | if (se_sess->sess_tearing_down) { |
3348 | @@ -2871,6 +2860,12 @@ static const struct sense_info sense_info_table[] = { |
3349 | .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ |
3350 | .add_sector_info = true, |
3351 | }, |
3352 | + [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { |
3353 | + .key = COPY_ABORTED, |
3354 | + .asc = 0x0d, |
3355 | + .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ |
3356 | + |
3357 | + }, |
3358 | [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { |
3359 | /* |
3360 | * Returning ILLEGAL REQUEST would cause immediate IO errors on |
3361 | diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c |
3362 | index 75cd85426ae3..094a1440eacb 100644 |
3363 | --- a/drivers/target/target_core_xcopy.c |
3364 | +++ b/drivers/target/target_core_xcopy.c |
3365 | @@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op |
3366 | } |
3367 | mutex_unlock(&g_device_mutex); |
3368 | |
3369 | - pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); |
3370 | + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); |
3371 | return -EINVAL; |
3372 | } |
3373 | |
3374 | @@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op |
3375 | |
3376 | static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, |
3377 | struct xcopy_op *xop, unsigned char *p, |
3378 | - unsigned short tdll) |
3379 | + unsigned short tdll, sense_reason_t *sense_ret) |
3380 | { |
3381 | struct se_device *local_dev = se_cmd->se_dev; |
3382 | unsigned char *desc = p; |
3383 | @@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, |
3384 | unsigned short start = 0; |
3385 | bool src = true; |
3386 | |
3387 | + *sense_ret = TCM_INVALID_PARAMETER_LIST; |
3388 | + |
3389 | if (offset != 0) { |
3390 | pr_err("XCOPY target descriptor list length is not" |
3391 | " multiple of %d\n", XCOPY_TARGET_DESC_LEN); |
3392 | @@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, |
3393 | rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); |
3394 | else |
3395 | rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); |
3396 | - |
3397 | - if (rc < 0) |
3398 | + /* |
3399 | + * If a matching IEEE NAA 0x83 descriptor for the requested device |
3400 | + * is not located on this node, return COPY_ABORTED with ASQ/ASQC |
3401 | + * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the |
3402 | + * initiator to fall back to normal copy method. |
3403 | + */ |
3404 | + if (rc < 0) { |
3405 | + *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE; |
3406 | goto out; |
3407 | + } |
3408 | |
3409 | pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", |
3410 | xop->src_dev, &xop->src_tid_wwn[0]); |
3411 | @@ -653,6 +662,7 @@ static int target_xcopy_read_source( |
3412 | rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], |
3413 | remote_port, true); |
3414 | if (rc < 0) { |
3415 | + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; |
3416 | transport_generic_free_cmd(se_cmd, 0); |
3417 | return rc; |
3418 | } |
3419 | @@ -664,6 +674,7 @@ static int target_xcopy_read_source( |
3420 | |
3421 | rc = target_xcopy_issue_pt_cmd(xpt_cmd); |
3422 | if (rc < 0) { |
3423 | + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; |
3424 | transport_generic_free_cmd(se_cmd, 0); |
3425 | return rc; |
3426 | } |
3427 | @@ -714,6 +725,7 @@ static int target_xcopy_write_destination( |
3428 | remote_port, false); |
3429 | if (rc < 0) { |
3430 | struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; |
3431 | + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; |
3432 | /* |
3433 | * If the failure happened before the t_mem_list hand-off in |
3434 | * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that |
3435 | @@ -729,6 +741,7 @@ static int target_xcopy_write_destination( |
3436 | |
3437 | rc = target_xcopy_issue_pt_cmd(xpt_cmd); |
3438 | if (rc < 0) { |
3439 | + ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; |
3440 | se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; |
3441 | transport_generic_free_cmd(se_cmd, 0); |
3442 | return rc; |
3443 | @@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work) |
3444 | out: |
3445 | xcopy_pt_undepend_remotedev(xop); |
3446 | kfree(xop); |
3447 | - |
3448 | - pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); |
3449 | - ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; |
3450 | + /* |
3451 | + * Don't override an error scsi status if it has already been set |
3452 | + */ |
3453 | + if (ec_cmd->scsi_status == SAM_STAT_GOOD) { |
3454 | + pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY" |
3455 | + " CHECK_CONDITION -> sending response\n", rc); |
3456 | + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; |
3457 | + } |
3458 | target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); |
3459 | } |
3460 | |
3461 | @@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) |
3462 | " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, |
3463 | tdll, sdll, inline_dl); |
3464 | |
3465 | - rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); |
3466 | + rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); |
3467 | if (rc <= 0) |
3468 | goto out; |
3469 | |
3470 | diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c |
3471 | index 216e18cc9133..9a874a89941d 100644 |
3472 | --- a/drivers/target/tcm_fc/tfc_cmd.c |
3473 | +++ b/drivers/target/tcm_fc/tfc_cmd.c |
3474 | @@ -572,7 +572,7 @@ static void ft_send_work(struct work_struct *work) |
3475 | if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, |
3476 | &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), |
3477 | ntohl(fcp->fc_dl), task_attr, data_dir, |
3478 | - TARGET_SCF_ACK_KREF)) |
3479 | + TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID)) |
3480 | goto err; |
3481 | |
3482 | pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); |
3483 | diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c |
3484 | index 924bad45c176..37a37c4d04cb 100644 |
3485 | --- a/drivers/video/fbdev/efifb.c |
3486 | +++ b/drivers/video/fbdev/efifb.c |
3487 | @@ -50,9 +50,9 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, |
3488 | return 1; |
3489 | |
3490 | if (regno < 16) { |
3491 | - red >>= 8; |
3492 | - green >>= 8; |
3493 | - blue >>= 8; |
3494 | + red >>= 16 - info->var.red.length; |
3495 | + green >>= 16 - info->var.green.length; |
3496 | + blue >>= 16 - info->var.blue.length; |
3497 | ((u32 *)(info->pseudo_palette))[regno] = |
3498 | (red << info->var.red.offset) | |
3499 | (green << info->var.green.offset) | |
3500 | diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c |
3501 | index 4a2290f900a8..d5735c12067d 100644 |
3502 | --- a/drivers/watchdog/mt7621_wdt.c |
3503 | +++ b/drivers/watchdog/mt7621_wdt.c |
3504 | @@ -139,7 +139,6 @@ static int mt7621_wdt_probe(struct platform_device *pdev) |
3505 | if (!IS_ERR(mt7621_wdt_reset)) |
3506 | reset_control_deassert(mt7621_wdt_reset); |
3507 | |
3508 | - mt7621_wdt_dev.dev = &pdev->dev; |
3509 | mt7621_wdt_dev.bootstatus = mt7621_wdt_bootcause(); |
3510 | |
3511 | watchdog_init_timeout(&mt7621_wdt_dev, mt7621_wdt_dev.max_timeout, |
3512 | diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c |
3513 | index 1967919ae743..14b4fd428fff 100644 |
3514 | --- a/drivers/watchdog/rt2880_wdt.c |
3515 | +++ b/drivers/watchdog/rt2880_wdt.c |
3516 | @@ -158,7 +158,6 @@ static int rt288x_wdt_probe(struct platform_device *pdev) |
3517 | |
3518 | rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE; |
3519 | |
3520 | - rt288x_wdt_dev.dev = &pdev->dev; |
3521 | rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); |
3522 | rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); |
3523 | rt288x_wdt_dev.parent = &pdev->dev; |
3524 | diff --git a/fs/ceph/file.c b/fs/ceph/file.c |
3525 | index 0f5375d8e030..eede975e85c0 100644 |
3526 | --- a/fs/ceph/file.c |
3527 | +++ b/fs/ceph/file.c |
3528 | @@ -1272,7 +1272,8 @@ again: |
3529 | statret = __ceph_do_getattr(inode, page, |
3530 | CEPH_STAT_CAP_INLINE_DATA, !!page); |
3531 | if (statret < 0) { |
3532 | - __free_page(page); |
3533 | + if (page) |
3534 | + __free_page(page); |
3535 | if (statret == -ENODATA) { |
3536 | BUG_ON(retry_op != READ_INLINE); |
3537 | goto again; |
3538 | diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c |
3539 | index 6c58e13fed2f..3d03e48a9213 100644 |
3540 | --- a/fs/cifs/cifs_debug.c |
3541 | +++ b/fs/cifs/cifs_debug.c |
3542 | @@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) |
3543 | list_for_each(tmp1, &cifs_tcp_ses_list) { |
3544 | server = list_entry(tmp1, struct TCP_Server_Info, |
3545 | tcp_ses_list); |
3546 | + seq_printf(m, "\nNumber of credits: %d", server->credits); |
3547 | i++; |
3548 | list_for_each(tmp2, &server->smb_ses_list) { |
3549 | ses = list_entry(tmp2, struct cifs_ses, |
3550 | diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c |
3551 | index 14ae4b8e1a3c..8c68d03a6949 100644 |
3552 | --- a/fs/cifs/cifsfs.c |
3553 | +++ b/fs/cifs/cifsfs.c |
3554 | @@ -271,7 +271,7 @@ cifs_alloc_inode(struct super_block *sb) |
3555 | cifs_inode->createtime = 0; |
3556 | cifs_inode->epoch = 0; |
3557 | #ifdef CONFIG_CIFS_SMB2 |
3558 | - get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE); |
3559 | + generate_random_uuid(cifs_inode->lease_key); |
3560 | #endif |
3561 | /* |
3562 | * Can not set i_flags here - they get immediately overwritten to zero |
3563 | @@ -1271,7 +1271,6 @@ init_cifs(void) |
3564 | GlobalTotalActiveXid = 0; |
3565 | GlobalMaxActiveXid = 0; |
3566 | spin_lock_init(&cifs_tcp_ses_lock); |
3567 | - spin_lock_init(&cifs_file_list_lock); |
3568 | spin_lock_init(&GlobalMid_Lock); |
3569 | |
3570 | get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret)); |
3571 | diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h |
3572 | index 8f1d8c1e72be..65f78b7a9062 100644 |
3573 | --- a/fs/cifs/cifsglob.h |
3574 | +++ b/fs/cifs/cifsglob.h |
3575 | @@ -833,6 +833,7 @@ struct cifs_tcon { |
3576 | struct list_head tcon_list; |
3577 | int tc_count; |
3578 | struct list_head openFileList; |
3579 | + spinlock_t open_file_lock; /* protects list above */ |
3580 | struct cifs_ses *ses; /* pointer to session associated with */ |
3581 | char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ |
3582 | char *nativeFileSystem; |
3583 | @@ -889,7 +890,7 @@ struct cifs_tcon { |
3584 | #endif /* CONFIG_CIFS_STATS2 */ |
3585 | __u64 bytes_read; |
3586 | __u64 bytes_written; |
3587 | - spinlock_t stat_lock; |
3588 | + spinlock_t stat_lock; /* protects the two fields above */ |
3589 | #endif /* CONFIG_CIFS_STATS */ |
3590 | FILE_SYSTEM_DEVICE_INFO fsDevInfo; |
3591 | FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */ |
3592 | @@ -1040,8 +1041,10 @@ struct cifs_fid_locks { |
3593 | }; |
3594 | |
3595 | struct cifsFileInfo { |
3596 | + /* following two lists are protected by tcon->open_file_lock */ |
3597 | struct list_head tlist; /* pointer to next fid owned by tcon */ |
3598 | struct list_head flist; /* next fid (file instance) for this inode */ |
3599 | + /* lock list below protected by cifsi->lock_sem */ |
3600 | struct cifs_fid_locks *llist; /* brlocks held by this fid */ |
3601 | kuid_t uid; /* allows finding which FileInfo structure */ |
3602 | __u32 pid; /* process id who opened file */ |
3603 | @@ -1049,11 +1052,12 @@ struct cifsFileInfo { |
3604 | /* BB add lock scope info here if needed */ ; |
3605 | /* lock scope id (0 if none) */ |
3606 | struct dentry *dentry; |
3607 | - unsigned int f_flags; |
3608 | struct tcon_link *tlink; |
3609 | + unsigned int f_flags; |
3610 | bool invalidHandle:1; /* file closed via session abend */ |
3611 | bool oplock_break_cancelled:1; |
3612 | - int count; /* refcount protected by cifs_file_list_lock */ |
3613 | + int count; |
3614 | + spinlock_t file_info_lock; /* protects four flag/count fields above */ |
3615 | struct mutex fh_mutex; /* prevents reopen race after dead ses*/ |
3616 | struct cifs_search_info srch_inf; |
3617 | struct work_struct oplock_break; /* work for oplock breaks */ |
3618 | @@ -1120,7 +1124,7 @@ struct cifs_writedata { |
3619 | |
3620 | /* |
3621 | * Take a reference on the file private data. Must be called with |
3622 | - * cifs_file_list_lock held. |
3623 | + * cfile->file_info_lock held. |
3624 | */ |
3625 | static inline void |
3626 | cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file) |
3627 | @@ -1514,8 +1518,10 @@ require use of the stronger protocol */ |
3628 | * GlobalMid_Lock protects: |
3629 | * list operations on pending_mid_q and oplockQ |
3630 | * updates to XID counters, multiplex id and SMB sequence numbers |
3631 | - * cifs_file_list_lock protects: |
3632 | - * list operations on tcp and SMB session lists and tCon lists |
3633 | + * tcp_ses_lock protects: |
3634 | + * list operations on tcp and SMB session lists |
3635 | + * tcon->open_file_lock protects the list of open files hanging off the tcon |
3636 | + * cfile->file_info_lock protects counters and fields in cifs file struct |
3637 | * f_owner.lock protects certain per file struct operations |
3638 | * mapping->page_lock protects certain per page operations |
3639 | * |
3640 | @@ -1547,18 +1553,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; |
3641 | * tcp session, and the list of tcon's per smb session. It also protects |
3642 | * the reference counters for the server, smb session, and tcon. Finally, |
3643 | * changes to the tcon->tidStatus should be done while holding this lock. |
3644 | + * generally the locks should be taken in order tcp_ses_lock before |
3645 | + * tcon->open_file_lock and that before file->file_info_lock since the |
3646 | + * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file |
3647 | */ |
3648 | GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock; |
3649 | |
3650 | -/* |
3651 | - * This lock protects the cifs_file->llist and cifs_file->flist |
3652 | - * list operations, and updates to some flags (cifs_file->invalidHandle) |
3653 | - * It will be moved to either use the tcon->stat_lock or equivalent later. |
3654 | - * If cifs_tcp_ses_lock and the lock below are both needed to be held, then |
3655 | - * the cifs_tcp_ses_lock must be grabbed first and released last. |
3656 | - */ |
3657 | -GLOBAL_EXTERN spinlock_t cifs_file_list_lock; |
3658 | - |
3659 | #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */ |
3660 | /* Outstanding dir notify requests */ |
3661 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; |
3662 | diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c |
3663 | index d47197ea4ab6..78046051bbbc 100644 |
3664 | --- a/fs/cifs/cifssmb.c |
3665 | +++ b/fs/cifs/cifssmb.c |
3666 | @@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon) |
3667 | struct list_head *tmp1; |
3668 | |
3669 | /* list all files open on tree connection and mark them invalid */ |
3670 | - spin_lock(&cifs_file_list_lock); |
3671 | + spin_lock(&tcon->open_file_lock); |
3672 | list_for_each_safe(tmp, tmp1, &tcon->openFileList) { |
3673 | open_file = list_entry(tmp, struct cifsFileInfo, tlist); |
3674 | open_file->invalidHandle = true; |
3675 | open_file->oplock_break_cancelled = true; |
3676 | } |
3677 | - spin_unlock(&cifs_file_list_lock); |
3678 | + spin_unlock(&tcon->open_file_lock); |
3679 | /* |
3680 | * BB Add call to invalidate_inodes(sb) for all superblocks mounted |
3681 | * to this tcon. |
3682 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
3683 | index 2e4f4bad8b1e..7b67179521cf 100644 |
3684 | --- a/fs/cifs/connect.c |
3685 | +++ b/fs/cifs/connect.c |
3686 | @@ -2163,7 +2163,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) |
3687 | memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, |
3688 | sizeof(tcp_ses->dstaddr)); |
3689 | #ifdef CONFIG_CIFS_SMB2 |
3690 | - get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE); |
3691 | + generate_random_uuid(tcp_ses->client_guid); |
3692 | #endif |
3693 | /* |
3694 | * at this point we are the only ones with the pointer |
3695 | @@ -3688,14 +3688,16 @@ remote_path_check: |
3696 | goto mount_fail_check; |
3697 | } |
3698 | |
3699 | - rc = cifs_are_all_path_components_accessible(server, |
3700 | + if (rc != -EREMOTE) { |
3701 | + rc = cifs_are_all_path_components_accessible(server, |
3702 | xid, tcon, cifs_sb, |
3703 | full_path); |
3704 | - if (rc != 0) { |
3705 | - cifs_dbg(VFS, "cannot query dirs between root and final path, " |
3706 | - "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); |
3707 | - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; |
3708 | - rc = 0; |
3709 | + if (rc != 0) { |
3710 | + cifs_dbg(VFS, "cannot query dirs between root and final path, " |
3711 | + "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); |
3712 | + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; |
3713 | + rc = 0; |
3714 | + } |
3715 | } |
3716 | kfree(full_path); |
3717 | } |
3718 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
3719 | index 579e41b350a2..605438afe7ef 100644 |
3720 | --- a/fs/cifs/file.c |
3721 | +++ b/fs/cifs/file.c |
3722 | @@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, |
3723 | cfile->tlink = cifs_get_tlink(tlink); |
3724 | INIT_WORK(&cfile->oplock_break, cifs_oplock_break); |
3725 | mutex_init(&cfile->fh_mutex); |
3726 | + spin_lock_init(&cfile->file_info_lock); |
3727 | |
3728 | cifs_sb_active(inode->i_sb); |
3729 | |
3730 | @@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, |
3731 | oplock = 0; |
3732 | } |
3733 | |
3734 | - spin_lock(&cifs_file_list_lock); |
3735 | + spin_lock(&tcon->open_file_lock); |
3736 | if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock) |
3737 | oplock = fid->pending_open->oplock; |
3738 | list_del(&fid->pending_open->olist); |
3739 | @@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, |
3740 | server->ops->set_fid(cfile, fid, oplock); |
3741 | |
3742 | list_add(&cfile->tlist, &tcon->openFileList); |
3743 | + |
3744 | /* if readable file instance put first in list*/ |
3745 | if (file->f_mode & FMODE_READ) |
3746 | list_add(&cfile->flist, &cinode->openFileList); |
3747 | else |
3748 | list_add_tail(&cfile->flist, &cinode->openFileList); |
3749 | - spin_unlock(&cifs_file_list_lock); |
3750 | + spin_unlock(&tcon->open_file_lock); |
3751 | |
3752 | if (fid->purge_cache) |
3753 | cifs_zap_mapping(inode); |
3754 | @@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, |
3755 | struct cifsFileInfo * |
3756 | cifsFileInfo_get(struct cifsFileInfo *cifs_file) |
3757 | { |
3758 | - spin_lock(&cifs_file_list_lock); |
3759 | + spin_lock(&cifs_file->file_info_lock); |
3760 | cifsFileInfo_get_locked(cifs_file); |
3761 | - spin_unlock(&cifs_file_list_lock); |
3762 | + spin_unlock(&cifs_file->file_info_lock); |
3763 | return cifs_file; |
3764 | } |
3765 | |
3766 | /* |
3767 | * Release a reference on the file private data. This may involve closing |
3768 | * the filehandle out on the server. Must be called without holding |
3769 | - * cifs_file_list_lock. |
3770 | + * tcon->open_file_lock and cifs_file->file_info_lock. |
3771 | */ |
3772 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
3773 | { |
3774 | @@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
3775 | struct cifs_pending_open open; |
3776 | bool oplock_break_cancelled; |
3777 | |
3778 | - spin_lock(&cifs_file_list_lock); |
3779 | + spin_lock(&tcon->open_file_lock); |
3780 | + |
3781 | + spin_lock(&cifs_file->file_info_lock); |
3782 | if (--cifs_file->count > 0) { |
3783 | - spin_unlock(&cifs_file_list_lock); |
3784 | + spin_unlock(&cifs_file->file_info_lock); |
3785 | + spin_unlock(&tcon->open_file_lock); |
3786 | return; |
3787 | } |
3788 | + spin_unlock(&cifs_file->file_info_lock); |
3789 | |
3790 | if (server->ops->get_lease_key) |
3791 | server->ops->get_lease_key(inode, &fid); |
3792 | @@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
3793 | set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags); |
3794 | cifs_set_oplock_level(cifsi, 0); |
3795 | } |
3796 | - spin_unlock(&cifs_file_list_lock); |
3797 | + |
3798 | + spin_unlock(&tcon->open_file_lock); |
3799 | |
3800 | oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break); |
3801 | |
3802 | @@ -772,10 +779,10 @@ int cifs_closedir(struct inode *inode, struct file *file) |
3803 | server = tcon->ses->server; |
3804 | |
3805 | cifs_dbg(FYI, "Freeing private data in close dir\n"); |
3806 | - spin_lock(&cifs_file_list_lock); |
3807 | + spin_lock(&cfile->file_info_lock); |
3808 | if (server->ops->dir_needs_close(cfile)) { |
3809 | cfile->invalidHandle = true; |
3810 | - spin_unlock(&cifs_file_list_lock); |
3811 | + spin_unlock(&cfile->file_info_lock); |
3812 | if (server->ops->close_dir) |
3813 | rc = server->ops->close_dir(xid, tcon, &cfile->fid); |
3814 | else |
3815 | @@ -784,7 +791,7 @@ int cifs_closedir(struct inode *inode, struct file *file) |
3816 | /* not much we can do if it fails anyway, ignore rc */ |
3817 | rc = 0; |
3818 | } else |
3819 | - spin_unlock(&cifs_file_list_lock); |
3820 | + spin_unlock(&cfile->file_info_lock); |
3821 | |
3822 | buf = cfile->srch_inf.ntwrk_buf_start; |
3823 | if (buf) { |
3824 | @@ -1728,12 +1735,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, |
3825 | { |
3826 | struct cifsFileInfo *open_file = NULL; |
3827 | struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); |
3828 | + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
3829 | |
3830 | /* only filter by fsuid on multiuser mounts */ |
3831 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) |
3832 | fsuid_only = false; |
3833 | |
3834 | - spin_lock(&cifs_file_list_lock); |
3835 | + spin_lock(&tcon->open_file_lock); |
3836 | /* we could simply get the first_list_entry since write-only entries |
3837 | are always at the end of the list but since the first entry might |
3838 | have a close pending, we go through the whole list */ |
3839 | @@ -1744,8 +1752,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, |
3840 | if (!open_file->invalidHandle) { |
3841 | /* found a good file */ |
3842 | /* lock it so it will not be closed on us */ |
3843 | - cifsFileInfo_get_locked(open_file); |
3844 | - spin_unlock(&cifs_file_list_lock); |
3845 | + cifsFileInfo_get(open_file); |
3846 | + spin_unlock(&tcon->open_file_lock); |
3847 | return open_file; |
3848 | } /* else might as well continue, and look for |
3849 | another, or simply have the caller reopen it |
3850 | @@ -1753,7 +1761,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, |
3851 | } else /* write only file */ |
3852 | break; /* write only files are last so must be done */ |
3853 | } |
3854 | - spin_unlock(&cifs_file_list_lock); |
3855 | + spin_unlock(&tcon->open_file_lock); |
3856 | return NULL; |
3857 | } |
3858 | |
3859 | @@ -1762,6 +1770,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, |
3860 | { |
3861 | struct cifsFileInfo *open_file, *inv_file = NULL; |
3862 | struct cifs_sb_info *cifs_sb; |
3863 | + struct cifs_tcon *tcon; |
3864 | bool any_available = false; |
3865 | int rc; |
3866 | unsigned int refind = 0; |
3867 | @@ -1777,15 +1786,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, |
3868 | } |
3869 | |
3870 | cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); |
3871 | + tcon = cifs_sb_master_tcon(cifs_sb); |
3872 | |
3873 | /* only filter by fsuid on multiuser mounts */ |
3874 | if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) |
3875 | fsuid_only = false; |
3876 | |
3877 | - spin_lock(&cifs_file_list_lock); |
3878 | + spin_lock(&tcon->open_file_lock); |
3879 | refind_writable: |
3880 | if (refind > MAX_REOPEN_ATT) { |
3881 | - spin_unlock(&cifs_file_list_lock); |
3882 | + spin_unlock(&tcon->open_file_lock); |
3883 | return NULL; |
3884 | } |
3885 | list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { |
3886 | @@ -1796,8 +1806,8 @@ refind_writable: |
3887 | if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { |
3888 | if (!open_file->invalidHandle) { |
3889 | /* found a good writable file */ |
3890 | - cifsFileInfo_get_locked(open_file); |
3891 | - spin_unlock(&cifs_file_list_lock); |
3892 | + cifsFileInfo_get(open_file); |
3893 | + spin_unlock(&tcon->open_file_lock); |
3894 | return open_file; |
3895 | } else { |
3896 | if (!inv_file) |
3897 | @@ -1813,24 +1823,24 @@ refind_writable: |
3898 | |
3899 | if (inv_file) { |
3900 | any_available = false; |
3901 | - cifsFileInfo_get_locked(inv_file); |
3902 | + cifsFileInfo_get(inv_file); |
3903 | } |
3904 | |
3905 | - spin_unlock(&cifs_file_list_lock); |
3906 | + spin_unlock(&tcon->open_file_lock); |
3907 | |
3908 | if (inv_file) { |
3909 | rc = cifs_reopen_file(inv_file, false); |
3910 | if (!rc) |
3911 | return inv_file; |
3912 | else { |
3913 | - spin_lock(&cifs_file_list_lock); |
3914 | + spin_lock(&tcon->open_file_lock); |
3915 | list_move_tail(&inv_file->flist, |
3916 | &cifs_inode->openFileList); |
3917 | - spin_unlock(&cifs_file_list_lock); |
3918 | + spin_unlock(&tcon->open_file_lock); |
3919 | cifsFileInfo_put(inv_file); |
3920 | - spin_lock(&cifs_file_list_lock); |
3921 | ++refind; |
3922 | inv_file = NULL; |
3923 | + spin_lock(&tcon->open_file_lock); |
3924 | goto refind_writable; |
3925 | } |
3926 | } |
3927 | @@ -3618,15 +3628,17 @@ static int cifs_readpage(struct file *file, struct page *page) |
3928 | static int is_inode_writable(struct cifsInodeInfo *cifs_inode) |
3929 | { |
3930 | struct cifsFileInfo *open_file; |
3931 | + struct cifs_tcon *tcon = |
3932 | + cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb)); |
3933 | |
3934 | - spin_lock(&cifs_file_list_lock); |
3935 | + spin_lock(&tcon->open_file_lock); |
3936 | list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { |
3937 | if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { |
3938 | - spin_unlock(&cifs_file_list_lock); |
3939 | + spin_unlock(&tcon->open_file_lock); |
3940 | return 1; |
3941 | } |
3942 | } |
3943 | - spin_unlock(&cifs_file_list_lock); |
3944 | + spin_unlock(&tcon->open_file_lock); |
3945 | return 0; |
3946 | } |
3947 | |
3948 | diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c |
3949 | index 813fe13c2ae1..c6729156f9a0 100644 |
3950 | --- a/fs/cifs/misc.c |
3951 | +++ b/fs/cifs/misc.c |
3952 | @@ -120,6 +120,7 @@ tconInfoAlloc(void) |
3953 | ++ret_buf->tc_count; |
3954 | INIT_LIST_HEAD(&ret_buf->openFileList); |
3955 | INIT_LIST_HEAD(&ret_buf->tcon_list); |
3956 | + spin_lock_init(&ret_buf->open_file_lock); |
3957 | #ifdef CONFIG_CIFS_STATS |
3958 | spin_lock_init(&ret_buf->stat_lock); |
3959 | #endif |
3960 | @@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) |
3961 | continue; |
3962 | |
3963 | cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); |
3964 | - spin_lock(&cifs_file_list_lock); |
3965 | + spin_lock(&tcon->open_file_lock); |
3966 | list_for_each(tmp2, &tcon->openFileList) { |
3967 | netfile = list_entry(tmp2, struct cifsFileInfo, |
3968 | tlist); |
3969 | @@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) |
3970 | &netfile->oplock_break); |
3971 | netfile->oplock_break_cancelled = false; |
3972 | |
3973 | - spin_unlock(&cifs_file_list_lock); |
3974 | + spin_unlock(&tcon->open_file_lock); |
3975 | spin_unlock(&cifs_tcp_ses_lock); |
3976 | return true; |
3977 | } |
3978 | - spin_unlock(&cifs_file_list_lock); |
3979 | + spin_unlock(&tcon->open_file_lock); |
3980 | spin_unlock(&cifs_tcp_ses_lock); |
3981 | cifs_dbg(FYI, "No matching file for oplock break\n"); |
3982 | return true; |
3983 | @@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb) |
3984 | void |
3985 | cifs_del_pending_open(struct cifs_pending_open *open) |
3986 | { |
3987 | - spin_lock(&cifs_file_list_lock); |
3988 | + spin_lock(&tlink_tcon(open->tlink)->open_file_lock); |
3989 | list_del(&open->olist); |
3990 | - spin_unlock(&cifs_file_list_lock); |
3991 | + spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); |
3992 | } |
3993 | |
3994 | void |
3995 | @@ -635,7 +636,7 @@ void |
3996 | cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink, |
3997 | struct cifs_pending_open *open) |
3998 | { |
3999 | - spin_lock(&cifs_file_list_lock); |
4000 | + spin_lock(&tlink_tcon(tlink)->open_file_lock); |
4001 | cifs_add_pending_open_locked(fid, tlink, open); |
4002 | - spin_unlock(&cifs_file_list_lock); |
4003 | + spin_unlock(&tlink_tcon(open->tlink)->open_file_lock); |
4004 | } |
4005 | diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c |
4006 | index 65cf85dcda09..8f6a2a5863b9 100644 |
4007 | --- a/fs/cifs/readdir.c |
4008 | +++ b/fs/cifs/readdir.c |
4009 | @@ -597,14 +597,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, |
4010 | is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) { |
4011 | /* close and restart search */ |
4012 | cifs_dbg(FYI, "search backing up - close and restart search\n"); |
4013 | - spin_lock(&cifs_file_list_lock); |
4014 | + spin_lock(&cfile->file_info_lock); |
4015 | if (server->ops->dir_needs_close(cfile)) { |
4016 | cfile->invalidHandle = true; |
4017 | - spin_unlock(&cifs_file_list_lock); |
4018 | + spin_unlock(&cfile->file_info_lock); |
4019 | if (server->ops->close_dir) |
4020 | server->ops->close_dir(xid, tcon, &cfile->fid); |
4021 | } else |
4022 | - spin_unlock(&cifs_file_list_lock); |
4023 | + spin_unlock(&cfile->file_info_lock); |
4024 | if (cfile->srch_inf.ntwrk_buf_start) { |
4025 | cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n"); |
4026 | if (cfile->srch_inf.smallBuf) |
4027 | diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h |
4028 | index 0ffa18094335..238759c146ba 100644 |
4029 | --- a/fs/cifs/smb2glob.h |
4030 | +++ b/fs/cifs/smb2glob.h |
4031 | @@ -61,4 +61,14 @@ |
4032 | /* Maximum buffer size value we can send with 1 credit */ |
4033 | #define SMB2_MAX_BUFFER_SIZE 65536 |
4034 | |
4035 | +/* |
4036 | + * Maximum number of credits to keep available. |
4037 | + * This value is chosen somewhat arbitrarily. The Windows client |
4038 | + * defaults to 128 credits, the Windows server allows clients up to |
4039 | + * 512 credits, and the NetApp server does not limit clients at all. |
4040 | + * Choose a high enough value such that the client shouldn't limit |
4041 | + * performance. |
4042 | + */ |
4043 | +#define SMB2_MAX_CREDITS_AVAILABLE 32000 |
4044 | + |
4045 | #endif /* _SMB2_GLOB_H */ |
4046 | diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c |
4047 | index 4f0231e685a9..1238cd3552f9 100644 |
4048 | --- a/fs/cifs/smb2inode.c |
4049 | +++ b/fs/cifs/smb2inode.c |
4050 | @@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path, |
4051 | struct tcon_link *tlink; |
4052 | int rc; |
4053 | |
4054 | + if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) && |
4055 | + (buf->LastWriteTime == 0) && (buf->ChangeTime) && |
4056 | + (buf->Attributes == 0)) |
4057 | + return 0; /* would be a no op, no sense sending this */ |
4058 | + |
4059 | tlink = cifs_sb_tlink(cifs_sb); |
4060 | if (IS_ERR(tlink)) |
4061 | return PTR_ERR(tlink); |
4062 | + |
4063 | rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path, |
4064 | FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf, |
4065 | SMB2_OP_SET_INFO); |
4066 | diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c |
4067 | index 389fb9f8c84e..3d383489b9cf 100644 |
4068 | --- a/fs/cifs/smb2misc.c |
4069 | +++ b/fs/cifs/smb2misc.c |
4070 | @@ -549,19 +549,19 @@ smb2_is_valid_lease_break(char *buffer) |
4071 | list_for_each(tmp1, &server->smb_ses_list) { |
4072 | ses = list_entry(tmp1, struct cifs_ses, smb_ses_list); |
4073 | |
4074 | - spin_lock(&cifs_file_list_lock); |
4075 | list_for_each(tmp2, &ses->tcon_list) { |
4076 | tcon = list_entry(tmp2, struct cifs_tcon, |
4077 | tcon_list); |
4078 | + spin_lock(&tcon->open_file_lock); |
4079 | cifs_stats_inc( |
4080 | &tcon->stats.cifs_stats.num_oplock_brks); |
4081 | if (smb2_tcon_has_lease(tcon, rsp, lw)) { |
4082 | - spin_unlock(&cifs_file_list_lock); |
4083 | + spin_unlock(&tcon->open_file_lock); |
4084 | spin_unlock(&cifs_tcp_ses_lock); |
4085 | return true; |
4086 | } |
4087 | + spin_unlock(&tcon->open_file_lock); |
4088 | } |
4089 | - spin_unlock(&cifs_file_list_lock); |
4090 | } |
4091 | } |
4092 | spin_unlock(&cifs_tcp_ses_lock); |
4093 | @@ -603,7 +603,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) |
4094 | tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); |
4095 | |
4096 | cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); |
4097 | - spin_lock(&cifs_file_list_lock); |
4098 | + spin_lock(&tcon->open_file_lock); |
4099 | list_for_each(tmp2, &tcon->openFileList) { |
4100 | cfile = list_entry(tmp2, struct cifsFileInfo, |
4101 | tlist); |
4102 | @@ -615,7 +615,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) |
4103 | |
4104 | cifs_dbg(FYI, "file id match, oplock break\n"); |
4105 | cinode = CIFS_I(d_inode(cfile->dentry)); |
4106 | - |
4107 | + spin_lock(&cfile->file_info_lock); |
4108 | if (!CIFS_CACHE_WRITE(cinode) && |
4109 | rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE) |
4110 | cfile->oplock_break_cancelled = true; |
4111 | @@ -637,14 +637,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) |
4112 | clear_bit( |
4113 | CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, |
4114 | &cinode->flags); |
4115 | - |
4116 | + spin_unlock(&cfile->file_info_lock); |
4117 | queue_work(cifsiod_wq, &cfile->oplock_break); |
4118 | |
4119 | - spin_unlock(&cifs_file_list_lock); |
4120 | + spin_unlock(&tcon->open_file_lock); |
4121 | spin_unlock(&cifs_tcp_ses_lock); |
4122 | return true; |
4123 | } |
4124 | - spin_unlock(&cifs_file_list_lock); |
4125 | + spin_unlock(&tcon->open_file_lock); |
4126 | spin_unlock(&cifs_tcp_ses_lock); |
4127 | cifs_dbg(FYI, "No matching file for oplock break\n"); |
4128 | return true; |
4129 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
4130 | index d203c0329626..0e73cefca65e 100644 |
4131 | --- a/fs/cifs/smb2ops.c |
4132 | +++ b/fs/cifs/smb2ops.c |
4133 | @@ -287,7 +287,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon) |
4134 | cifs_dbg(FYI, "Link Speed %lld\n", |
4135 | le64_to_cpu(out_buf->LinkSpeed)); |
4136 | } |
4137 | - |
4138 | + kfree(out_buf); |
4139 | return rc; |
4140 | } |
4141 | #endif /* STATS2 */ |
4142 | @@ -541,6 +541,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) |
4143 | server->ops->set_oplock_level(cinode, oplock, fid->epoch, |
4144 | &fid->purge_cache); |
4145 | cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode); |
4146 | + memcpy(cfile->fid.create_guid, fid->create_guid, 16); |
4147 | } |
4148 | |
4149 | static void |
4150 | @@ -699,6 +700,7 @@ smb2_clone_range(const unsigned int xid, |
4151 | |
4152 | cchunk_out: |
4153 | kfree(pcchunk); |
4154 | + kfree(retbuf); |
4155 | return rc; |
4156 | } |
4157 | |
4158 | @@ -823,7 +825,6 @@ smb2_duplicate_extents(const unsigned int xid, |
4159 | { |
4160 | int rc; |
4161 | unsigned int ret_data_len; |
4162 | - char *retbuf = NULL; |
4163 | struct duplicate_extents_to_file dup_ext_buf; |
4164 | struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink); |
4165 | |
4166 | @@ -849,7 +850,7 @@ smb2_duplicate_extents(const unsigned int xid, |
4167 | FSCTL_DUPLICATE_EXTENTS_TO_FILE, |
4168 | true /* is_fsctl */, (char *)&dup_ext_buf, |
4169 | sizeof(struct duplicate_extents_to_file), |
4170 | - (char **)&retbuf, |
4171 | + NULL, |
4172 | &ret_data_len); |
4173 | |
4174 | if (ret_data_len > 0) |
4175 | @@ -872,7 +873,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, |
4176 | struct cifsFileInfo *cfile) |
4177 | { |
4178 | struct fsctl_set_integrity_information_req integr_info; |
4179 | - char *retbuf = NULL; |
4180 | unsigned int ret_data_len; |
4181 | |
4182 | integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED); |
4183 | @@ -884,7 +884,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon, |
4184 | FSCTL_SET_INTEGRITY_INFORMATION, |
4185 | true /* is_fsctl */, (char *)&integr_info, |
4186 | sizeof(struct fsctl_set_integrity_information_req), |
4187 | - (char **)&retbuf, |
4188 | + NULL, |
4189 | &ret_data_len); |
4190 | |
4191 | } |
4192 | @@ -1041,7 +1041,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid) |
4193 | static void |
4194 | smb2_new_lease_key(struct cifs_fid *fid) |
4195 | { |
4196 | - get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE); |
4197 | + generate_random_uuid(fid->lease_key); |
4198 | } |
4199 | |
4200 | #define SMB2_SYMLINK_STRUCT_SIZE \ |
4201 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
4202 | index 29e06db5f187..3eec96ca87d9 100644 |
4203 | --- a/fs/cifs/smb2pdu.c |
4204 | +++ b/fs/cifs/smb2pdu.c |
4205 | @@ -100,7 +100,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , |
4206 | hdr->ProtocolId = SMB2_PROTO_NUMBER; |
4207 | hdr->StructureSize = cpu_to_le16(64); |
4208 | hdr->Command = smb2_cmd; |
4209 | - hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */ |
4210 | + if (tcon && tcon->ses && tcon->ses->server) { |
4211 | + struct TCP_Server_Info *server = tcon->ses->server; |
4212 | + |
4213 | + spin_lock(&server->req_lock); |
4214 | + /* Request up to 2 credits but don't go over the limit. */ |
4215 | + if (server->credits >= SMB2_MAX_CREDITS_AVAILABLE) |
4216 | + hdr->CreditRequest = cpu_to_le16(0); |
4217 | + else |
4218 | + hdr->CreditRequest = cpu_to_le16( |
4219 | + min_t(int, SMB2_MAX_CREDITS_AVAILABLE - |
4220 | + server->credits, 2)); |
4221 | + spin_unlock(&server->req_lock); |
4222 | + } else { |
4223 | + hdr->CreditRequest = cpu_to_le16(2); |
4224 | + } |
4225 | hdr->ProcessId = cpu_to_le32((__u16)current->tgid); |
4226 | |
4227 | if (!tcon) |
4228 | @@ -590,6 +604,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, |
4229 | char *security_blob = NULL; |
4230 | unsigned char *ntlmssp_blob = NULL; |
4231 | bool use_spnego = false; /* else use raw ntlmssp */ |
4232 | + u64 previous_session = ses->Suid; |
4233 | |
4234 | cifs_dbg(FYI, "Session Setup\n"); |
4235 | |
4236 | @@ -627,6 +642,10 @@ ssetup_ntlmssp_authenticate: |
4237 | return rc; |
4238 | |
4239 | req->hdr.SessionId = 0; /* First session, not a reauthenticate */ |
4240 | + |
4241 | + /* if reconnect, we need to send previous sess id, otherwise it is 0 */ |
4242 | + req->PreviousSessionId = previous_session; |
4243 | + |
4244 | req->Flags = 0; /* MBZ */ |
4245 | /* to enable echos and oplocks */ |
4246 | req->hdr.CreditRequest = cpu_to_le16(3); |
4247 | @@ -1164,7 +1183,7 @@ create_durable_v2_buf(struct cifs_fid *pfid) |
4248 | |
4249 | buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ |
4250 | buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); |
4251 | - get_random_bytes(buf->dcontext.CreateGuid, 16); |
4252 | + generate_random_uuid(buf->dcontext.CreateGuid); |
4253 | memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); |
4254 | |
4255 | /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */ |
4256 | @@ -2057,6 +2076,7 @@ smb2_async_readv(struct cifs_readdata *rdata) |
4257 | if (rdata->credits) { |
4258 | buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, |
4259 | SMB2_MAX_BUFFER_SIZE)); |
4260 | + buf->CreditRequest = buf->CreditCharge; |
4261 | spin_lock(&server->req_lock); |
4262 | server->credits += rdata->credits - |
4263 | le16_to_cpu(buf->CreditCharge); |
4264 | @@ -2243,6 +2263,7 @@ smb2_async_writev(struct cifs_writedata *wdata, |
4265 | if (wdata->credits) { |
4266 | req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, |
4267 | SMB2_MAX_BUFFER_SIZE)); |
4268 | + req->hdr.CreditRequest = req->hdr.CreditCharge; |
4269 | spin_lock(&server->req_lock); |
4270 | server->credits += wdata->credits - |
4271 | le16_to_cpu(req->hdr.CreditCharge); |
4272 | diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h |
4273 | index ff88d9feb01e..fd3709e8de33 100644 |
4274 | --- a/fs/cifs/smb2pdu.h |
4275 | +++ b/fs/cifs/smb2pdu.h |
4276 | @@ -276,7 +276,7 @@ struct smb2_sess_setup_req { |
4277 | __le32 Channel; |
4278 | __le16 SecurityBufferOffset; |
4279 | __le16 SecurityBufferLength; |
4280 | - __le64 PreviousSessionId; |
4281 | + __u64 PreviousSessionId; |
4282 | __u8 Buffer[1]; /* variable length GSS security buffer */ |
4283 | } __packed; |
4284 | |
4285 | diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c |
4286 | index c502c116924c..55d64fba1e87 100644 |
4287 | --- a/fs/crypto/crypto.c |
4288 | +++ b/fs/crypto/crypto.c |
4289 | @@ -152,7 +152,10 @@ static int do_page_crypto(struct inode *inode, |
4290 | struct page *src_page, struct page *dest_page, |
4291 | gfp_t gfp_flags) |
4292 | { |
4293 | - u8 xts_tweak[FS_XTS_TWEAK_SIZE]; |
4294 | + struct { |
4295 | + __le64 index; |
4296 | + u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)]; |
4297 | + } xts_tweak; |
4298 | struct skcipher_request *req = NULL; |
4299 | DECLARE_FS_COMPLETION_RESULT(ecr); |
4300 | struct scatterlist dst, src; |
4301 | @@ -172,17 +175,15 @@ static int do_page_crypto(struct inode *inode, |
4302 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
4303 | fscrypt_complete, &ecr); |
4304 | |
4305 | - BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); |
4306 | - memcpy(xts_tweak, &index, sizeof(index)); |
4307 | - memset(&xts_tweak[sizeof(index)], 0, |
4308 | - FS_XTS_TWEAK_SIZE - sizeof(index)); |
4309 | + BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE); |
4310 | + xts_tweak.index = cpu_to_le64(index); |
4311 | + memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding)); |
4312 | |
4313 | sg_init_table(&dst, 1); |
4314 | sg_set_page(&dst, dest_page, PAGE_SIZE, 0); |
4315 | sg_init_table(&src, 1); |
4316 | sg_set_page(&src, src_page, PAGE_SIZE, 0); |
4317 | - skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, |
4318 | - xts_tweak); |
4319 | + skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak); |
4320 | if (rw == FS_DECRYPT) |
4321 | res = crypto_skcipher_decrypt(req); |
4322 | else |
4323 | diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c |
4324 | index ed115acb5dee..6865663aac69 100644 |
4325 | --- a/fs/crypto/policy.c |
4326 | +++ b/fs/crypto/policy.c |
4327 | @@ -109,6 +109,8 @@ int fscrypt_process_policy(struct file *filp, |
4328 | if (ret) |
4329 | return ret; |
4330 | |
4331 | + inode_lock(inode); |
4332 | + |
4333 | if (!inode_has_encryption_context(inode)) { |
4334 | if (!S_ISDIR(inode->i_mode)) |
4335 | ret = -EINVAL; |
4336 | @@ -127,6 +129,8 @@ int fscrypt_process_policy(struct file *filp, |
4337 | ret = -EINVAL; |
4338 | } |
4339 | |
4340 | + inode_unlock(inode); |
4341 | + |
4342 | mnt_drop_write_file(filp); |
4343 | return ret; |
4344 | } |
4345 | diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c |
4346 | index 73bcfd41f5f2..42145be5c6b4 100644 |
4347 | --- a/fs/ext4/sysfs.c |
4348 | +++ b/fs/ext4/sysfs.c |
4349 | @@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = { |
4350 | EXT4_ATTR_FEATURE(lazy_itable_init); |
4351 | EXT4_ATTR_FEATURE(batched_discard); |
4352 | EXT4_ATTR_FEATURE(meta_bg_resize); |
4353 | +#ifdef CONFIG_EXT4_FS_ENCRYPTION |
4354 | EXT4_ATTR_FEATURE(encryption); |
4355 | +#endif |
4356 | EXT4_ATTR_FEATURE(metadata_csum_seed); |
4357 | |
4358 | static struct attribute *ext4_feat_attrs[] = { |
4359 | ATTR_LIST(lazy_itable_init), |
4360 | ATTR_LIST(batched_discard), |
4361 | ATTR_LIST(meta_bg_resize), |
4362 | +#ifdef CONFIG_EXT4_FS_ENCRYPTION |
4363 | ATTR_LIST(encryption), |
4364 | +#endif |
4365 | ATTR_LIST(metadata_csum_seed), |
4366 | NULL, |
4367 | }; |
4368 | diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c |
4369 | index ad0c745ebad7..871c8b392099 100644 |
4370 | --- a/fs/isofs/inode.c |
4371 | +++ b/fs/isofs/inode.c |
4372 | @@ -687,6 +687,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent) |
4373 | pri_bh = NULL; |
4374 | |
4375 | root_found: |
4376 | + /* We don't support read-write mounts */ |
4377 | + if (!(s->s_flags & MS_RDONLY)) { |
4378 | + error = -EACCES; |
4379 | + goto out_freebh; |
4380 | + } |
4381 | |
4382 | if (joliet_level && (pri == NULL || !opt.rock)) { |
4383 | /* This is the case of Joliet with the norock mount flag. |
4384 | @@ -1501,9 +1506,6 @@ struct inode *__isofs_iget(struct super_block *sb, |
4385 | static struct dentry *isofs_mount(struct file_system_type *fs_type, |
4386 | int flags, const char *dev_name, void *data) |
4387 | { |
4388 | - /* We don't support read-write mounts */ |
4389 | - if (!(flags & MS_RDONLY)) |
4390 | - return ERR_PTR(-EACCES); |
4391 | return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); |
4392 | } |
4393 | |
4394 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
4395 | index 3d8246a9faa4..e1652665bd93 100644 |
4396 | --- a/fs/jbd2/transaction.c |
4397 | +++ b/fs/jbd2/transaction.c |
4398 | @@ -1149,6 +1149,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) |
4399 | JBUFFER_TRACE(jh, "file as BJ_Reserved"); |
4400 | spin_lock(&journal->j_list_lock); |
4401 | __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); |
4402 | + spin_unlock(&journal->j_list_lock); |
4403 | } else if (jh->b_transaction == journal->j_committing_transaction) { |
4404 | /* first access by this transaction */ |
4405 | jh->b_modified = 0; |
4406 | @@ -1156,8 +1157,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) |
4407 | JBUFFER_TRACE(jh, "set next transaction"); |
4408 | spin_lock(&journal->j_list_lock); |
4409 | jh->b_next_transaction = transaction; |
4410 | + spin_unlock(&journal->j_list_lock); |
4411 | } |
4412 | - spin_unlock(&journal->j_list_lock); |
4413 | jbd_unlock_bh_state(bh); |
4414 | |
4415 | /* |
4416 | diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c |
4417 | index 217847679f0e..2905479f214a 100644 |
4418 | --- a/fs/nfs/blocklayout/blocklayout.c |
4419 | +++ b/fs/nfs/blocklayout/blocklayout.c |
4420 | @@ -344,9 +344,10 @@ static void bl_write_cleanup(struct work_struct *work) |
4421 | u64 start = hdr->args.offset & (loff_t)PAGE_MASK; |
4422 | u64 end = (hdr->args.offset + hdr->args.count + |
4423 | PAGE_SIZE - 1) & (loff_t)PAGE_MASK; |
4424 | + u64 lwb = hdr->args.offset + hdr->args.count; |
4425 | |
4426 | ext_tree_mark_written(bl, start >> SECTOR_SHIFT, |
4427 | - (end - start) >> SECTOR_SHIFT, end); |
4428 | + (end - start) >> SECTOR_SHIFT, lwb); |
4429 | } |
4430 | |
4431 | pnfs_ld_write_done(hdr); |
4432 | diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c |
4433 | index 322c2585bc34..b9c65421ed81 100644 |
4434 | --- a/fs/nfs/delegation.c |
4435 | +++ b/fs/nfs/delegation.c |
4436 | @@ -41,6 +41,17 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) |
4437 | set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); |
4438 | } |
4439 | |
4440 | +static bool |
4441 | +nfs4_is_valid_delegation(const struct nfs_delegation *delegation, |
4442 | + fmode_t flags) |
4443 | +{ |
4444 | + if (delegation != NULL && (delegation->type & flags) == flags && |
4445 | + !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) && |
4446 | + !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) |
4447 | + return true; |
4448 | + return false; |
4449 | +} |
4450 | + |
4451 | static int |
4452 | nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) |
4453 | { |
4454 | @@ -50,8 +61,7 @@ nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) |
4455 | flags &= FMODE_READ|FMODE_WRITE; |
4456 | rcu_read_lock(); |
4457 | delegation = rcu_dereference(NFS_I(inode)->delegation); |
4458 | - if (delegation != NULL && (delegation->type & flags) == flags && |
4459 | - !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { |
4460 | + if (nfs4_is_valid_delegation(delegation, flags)) { |
4461 | if (mark) |
4462 | nfs_mark_delegation_referenced(delegation); |
4463 | ret = 1; |
4464 | @@ -893,7 +903,7 @@ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, |
4465 | flags &= FMODE_READ|FMODE_WRITE; |
4466 | rcu_read_lock(); |
4467 | delegation = rcu_dereference(nfsi->delegation); |
4468 | - ret = (delegation != NULL && (delegation->type & flags) == flags); |
4469 | + ret = nfs4_is_valid_delegation(delegation, flags); |
4470 | if (ret) { |
4471 | nfs4_stateid_copy(dst, &delegation->stateid); |
4472 | nfs_mark_delegation_referenced(delegation); |
4473 | diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c |
4474 | index 177fefb26c18..6bc5a68e39f1 100644 |
4475 | --- a/fs/nfs/dir.c |
4476 | +++ b/fs/nfs/dir.c |
4477 | @@ -435,11 +435,11 @@ int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) |
4478 | return 0; |
4479 | |
4480 | nfsi = NFS_I(inode); |
4481 | - if (entry->fattr->fileid == nfsi->fileid) |
4482 | - return 1; |
4483 | - if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0) |
4484 | - return 1; |
4485 | - return 0; |
4486 | + if (entry->fattr->fileid != nfsi->fileid) |
4487 | + return 0; |
4488 | + if (entry->fh->size && nfs_compare_fh(entry->fh, &nfsi->fh) != 0) |
4489 | + return 0; |
4490 | + return 1; |
4491 | } |
4492 | |
4493 | static |
4494 | @@ -517,6 +517,8 @@ again: |
4495 | &entry->fattr->fsid)) |
4496 | goto out; |
4497 | if (nfs_same_file(dentry, entry)) { |
4498 | + if (!entry->fh->size) |
4499 | + goto out; |
4500 | nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); |
4501 | status = nfs_refresh_inode(d_inode(dentry), entry->fattr); |
4502 | if (!status) |
4503 | @@ -529,6 +531,10 @@ again: |
4504 | goto again; |
4505 | } |
4506 | } |
4507 | + if (!entry->fh->size) { |
4508 | + d_lookup_done(dentry); |
4509 | + goto out; |
4510 | + } |
4511 | |
4512 | inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label); |
4513 | alias = d_splice_alias(inode, dentry); |
4514 | diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c |
4515 | index 64b43b4ad9dd..608501971fe0 100644 |
4516 | --- a/fs/nfs/nfs42proc.c |
4517 | +++ b/fs/nfs/nfs42proc.c |
4518 | @@ -443,6 +443,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server, |
4519 | task = rpc_run_task(&task_setup); |
4520 | if (IS_ERR(task)) |
4521 | return PTR_ERR(task); |
4522 | + rpc_put_task(task); |
4523 | return 0; |
4524 | } |
4525 | |
4526 | diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c |
4527 | index cada00aa5096..8353f33f0466 100644 |
4528 | --- a/fs/nfs/nfs4state.c |
4529 | +++ b/fs/nfs/nfs4state.c |
4530 | @@ -1498,6 +1498,9 @@ restart: |
4531 | __func__, status); |
4532 | case -ENOENT: |
4533 | case -ENOMEM: |
4534 | + case -EACCES: |
4535 | + case -EROFS: |
4536 | + case -EIO: |
4537 | case -ESTALE: |
4538 | /* Open state on this file cannot be recovered */ |
4539 | nfs4_state_mark_recovery_failed(state, status); |
4540 | diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c |
4541 | index 45007acaf364..a2b65fc56dd6 100644 |
4542 | --- a/fs/nfsd/nfssvc.c |
4543 | +++ b/fs/nfsd/nfssvc.c |
4544 | @@ -366,14 +366,21 @@ static struct notifier_block nfsd_inet6addr_notifier = { |
4545 | }; |
4546 | #endif |
4547 | |
4548 | +/* Only used under nfsd_mutex, so this atomic may be overkill: */ |
4549 | +static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0); |
4550 | + |
4551 | static void nfsd_last_thread(struct svc_serv *serv, struct net *net) |
4552 | { |
4553 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
4554 | |
4555 | - unregister_inetaddr_notifier(&nfsd_inetaddr_notifier); |
4556 | + /* check if the notifier still has clients */ |
4557 | + if (atomic_dec_return(&nfsd_notifier_refcount) == 0) { |
4558 | + unregister_inetaddr_notifier(&nfsd_inetaddr_notifier); |
4559 | #if IS_ENABLED(CONFIG_IPV6) |
4560 | - unregister_inet6addr_notifier(&nfsd_inet6addr_notifier); |
4561 | + unregister_inet6addr_notifier(&nfsd_inet6addr_notifier); |
4562 | #endif |
4563 | + } |
4564 | + |
4565 | /* |
4566 | * write_ports can create the server without actually starting |
4567 | * any threads--if we get shut down before any threads are |
4568 | @@ -488,10 +495,13 @@ int nfsd_create_serv(struct net *net) |
4569 | } |
4570 | |
4571 | set_max_drc(); |
4572 | - register_inetaddr_notifier(&nfsd_inetaddr_notifier); |
4573 | + /* check if the notifier is already set */ |
4574 | + if (atomic_inc_return(&nfsd_notifier_refcount) == 1) { |
4575 | + register_inetaddr_notifier(&nfsd_inetaddr_notifier); |
4576 | #if IS_ENABLED(CONFIG_IPV6) |
4577 | - register_inet6addr_notifier(&nfsd_inet6addr_notifier); |
4578 | + register_inet6addr_notifier(&nfsd_inet6addr_notifier); |
4579 | #endif |
4580 | + } |
4581 | do_gettimeofday(&nn->nfssvc_boot); /* record boot time */ |
4582 | return 0; |
4583 | } |
4584 | diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c |
4585 | index 43fdc2765aea..abadbc30e013 100644 |
4586 | --- a/fs/overlayfs/copy_up.c |
4587 | +++ b/fs/overlayfs/copy_up.c |
4588 | @@ -57,6 +57,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) |
4589 | ssize_t list_size, size, value_size = 0; |
4590 | char *buf, *name, *value = NULL; |
4591 | int uninitialized_var(error); |
4592 | + size_t slen; |
4593 | |
4594 | if (!old->d_inode->i_op->getxattr || |
4595 | !new->d_inode->i_op->getxattr) |
4596 | @@ -79,7 +80,16 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) |
4597 | goto out; |
4598 | } |
4599 | |
4600 | - for (name = buf; name < (buf + list_size); name += strlen(name) + 1) { |
4601 | + for (name = buf; list_size; name += slen) { |
4602 | + slen = strnlen(name, list_size) + 1; |
4603 | + |
4604 | + /* underlying fs providing us with an broken xattr list? */ |
4605 | + if (WARN_ON(slen > list_size)) { |
4606 | + error = -EIO; |
4607 | + break; |
4608 | + } |
4609 | + list_size -= slen; |
4610 | + |
4611 | if (ovl_is_private_xattr(name)) |
4612 | continue; |
4613 | retry: |
4614 | diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c |
4615 | index 1560fdc09a5f..74e696426aae 100644 |
4616 | --- a/fs/overlayfs/dir.c |
4617 | +++ b/fs/overlayfs/dir.c |
4618 | @@ -14,6 +14,7 @@ |
4619 | #include <linux/cred.h> |
4620 | #include <linux/posix_acl.h> |
4621 | #include <linux/posix_acl_xattr.h> |
4622 | +#include <linux/atomic.h> |
4623 | #include "overlayfs.h" |
4624 | |
4625 | void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) |
4626 | @@ -37,8 +38,10 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) |
4627 | { |
4628 | struct dentry *temp; |
4629 | char name[20]; |
4630 | + static atomic_t temp_id = ATOMIC_INIT(0); |
4631 | |
4632 | - snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry); |
4633 | + /* counter is allowed to wrap, since temp dentries are ephemeral */ |
4634 | + snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id)); |
4635 | |
4636 | temp = lookup_one_len(name, workdir, strlen(name)); |
4637 | if (!IS_ERR(temp) && temp->d_inode) { |
4638 | diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c |
4639 | index 7a034d62cf8c..2340262a7e97 100644 |
4640 | --- a/fs/pstore/ram.c |
4641 | +++ b/fs/pstore/ram.c |
4642 | @@ -377,13 +377,14 @@ static void ramoops_free_przs(struct ramoops_context *cxt) |
4643 | { |
4644 | int i; |
4645 | |
4646 | - cxt->max_dump_cnt = 0; |
4647 | if (!cxt->przs) |
4648 | return; |
4649 | |
4650 | - for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++) |
4651 | + for (i = 0; i < cxt->max_dump_cnt; i++) |
4652 | persistent_ram_free(cxt->przs[i]); |
4653 | + |
4654 | kfree(cxt->przs); |
4655 | + cxt->max_dump_cnt = 0; |
4656 | } |
4657 | |
4658 | static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, |
4659 | @@ -408,7 +409,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, |
4660 | GFP_KERNEL); |
4661 | if (!cxt->przs) { |
4662 | dev_err(dev, "failed to initialize a prz array for dumps\n"); |
4663 | - goto fail_prz; |
4664 | + goto fail_mem; |
4665 | } |
4666 | |
4667 | for (i = 0; i < cxt->max_dump_cnt; i++) { |
4668 | @@ -419,6 +420,11 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, |
4669 | err = PTR_ERR(cxt->przs[i]); |
4670 | dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", |
4671 | cxt->record_size, (unsigned long long)*paddr, err); |
4672 | + |
4673 | + while (i > 0) { |
4674 | + i--; |
4675 | + persistent_ram_free(cxt->przs[i]); |
4676 | + } |
4677 | goto fail_prz; |
4678 | } |
4679 | *paddr += cxt->record_size; |
4680 | @@ -426,7 +432,9 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, |
4681 | |
4682 | return 0; |
4683 | fail_prz: |
4684 | - ramoops_free_przs(cxt); |
4685 | + kfree(cxt->przs); |
4686 | +fail_mem: |
4687 | + cxt->max_dump_cnt = 0; |
4688 | return err; |
4689 | } |
4690 | |
4691 | @@ -659,7 +667,6 @@ static int ramoops_remove(struct platform_device *pdev) |
4692 | struct ramoops_context *cxt = &oops_cxt; |
4693 | |
4694 | pstore_unregister(&cxt->pstore); |
4695 | - cxt->max_dump_cnt = 0; |
4696 | |
4697 | kfree(cxt->pstore.buf); |
4698 | cxt->pstore.bufsize = 0; |
4699 | diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c |
4700 | index 76c3f80efdfa..364d2dffe5a6 100644 |
4701 | --- a/fs/pstore/ram_core.c |
4702 | +++ b/fs/pstore/ram_core.c |
4703 | @@ -47,43 +47,10 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz) |
4704 | return atomic_read(&prz->buffer->start); |
4705 | } |
4706 | |
4707 | -/* increase and wrap the start pointer, returning the old value */ |
4708 | -static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a) |
4709 | -{ |
4710 | - int old; |
4711 | - int new; |
4712 | - |
4713 | - do { |
4714 | - old = atomic_read(&prz->buffer->start); |
4715 | - new = old + a; |
4716 | - while (unlikely(new >= prz->buffer_size)) |
4717 | - new -= prz->buffer_size; |
4718 | - } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old); |
4719 | - |
4720 | - return old; |
4721 | -} |
4722 | - |
4723 | -/* increase the size counter until it hits the max size */ |
4724 | -static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a) |
4725 | -{ |
4726 | - size_t old; |
4727 | - size_t new; |
4728 | - |
4729 | - if (atomic_read(&prz->buffer->size) == prz->buffer_size) |
4730 | - return; |
4731 | - |
4732 | - do { |
4733 | - old = atomic_read(&prz->buffer->size); |
4734 | - new = old + a; |
4735 | - if (new > prz->buffer_size) |
4736 | - new = prz->buffer_size; |
4737 | - } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old); |
4738 | -} |
4739 | - |
4740 | static DEFINE_RAW_SPINLOCK(buffer_lock); |
4741 | |
4742 | /* increase and wrap the start pointer, returning the old value */ |
4743 | -static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) |
4744 | +static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) |
4745 | { |
4746 | int old; |
4747 | int new; |
4748 | @@ -103,7 +70,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a) |
4749 | } |
4750 | |
4751 | /* increase the size counter until it hits the max size */ |
4752 | -static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a) |
4753 | +static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) |
4754 | { |
4755 | size_t old; |
4756 | size_t new; |
4757 | @@ -124,9 +91,6 @@ exit: |
4758 | raw_spin_unlock_irqrestore(&buffer_lock, flags); |
4759 | } |
4760 | |
4761 | -static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic; |
4762 | -static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic; |
4763 | - |
4764 | static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, |
4765 | uint8_t *data, size_t len, uint8_t *ecc) |
4766 | { |
4767 | @@ -299,7 +263,7 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz, |
4768 | const void *s, unsigned int start, unsigned int count) |
4769 | { |
4770 | struct persistent_ram_buffer *buffer = prz->buffer; |
4771 | - memcpy(buffer->data + start, s, count); |
4772 | + memcpy_toio(buffer->data + start, s, count); |
4773 | persistent_ram_update_ecc(prz, start, count); |
4774 | } |
4775 | |
4776 | @@ -322,8 +286,8 @@ void persistent_ram_save_old(struct persistent_ram_zone *prz) |
4777 | } |
4778 | |
4779 | prz->old_log_size = size; |
4780 | - memcpy(prz->old_log, &buffer->data[start], size - start); |
4781 | - memcpy(prz->old_log + size - start, &buffer->data[0], start); |
4782 | + memcpy_fromio(prz->old_log, &buffer->data[start], size - start); |
4783 | + memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); |
4784 | } |
4785 | |
4786 | int notrace persistent_ram_write(struct persistent_ram_zone *prz, |
4787 | @@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size, |
4788 | return NULL; |
4789 | } |
4790 | |
4791 | - buffer_start_add = buffer_start_add_locked; |
4792 | - buffer_size_add = buffer_size_add_locked; |
4793 | - |
4794 | if (memtype) |
4795 | va = ioremap(start, size); |
4796 | else |
4797 | diff --git a/fs/super.c b/fs/super.c |
4798 | index c2ff475c1711..47d11e0462d0 100644 |
4799 | --- a/fs/super.c |
4800 | +++ b/fs/super.c |
4801 | @@ -1379,8 +1379,8 @@ int freeze_super(struct super_block *sb) |
4802 | } |
4803 | } |
4804 | /* |
4805 | - * This is just for debugging purposes so that fs can warn if it |
4806 | - * sees write activity when frozen is set to SB_FREEZE_COMPLETE. |
4807 | + * For debugging purposes so that fs can warn if it sees write activity |
4808 | + * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super(). |
4809 | */ |
4810 | sb->s_writers.frozen = SB_FREEZE_COMPLETE; |
4811 | up_write(&sb->s_umount); |
4812 | @@ -1399,7 +1399,7 @@ int thaw_super(struct super_block *sb) |
4813 | int error; |
4814 | |
4815 | down_write(&sb->s_umount); |
4816 | - if (sb->s_writers.frozen == SB_UNFROZEN) { |
4817 | + if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) { |
4818 | up_write(&sb->s_umount); |
4819 | return -EINVAL; |
4820 | } |
4821 | diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c |
4822 | index 11a004114eba..c9ee6f6efa07 100644 |
4823 | --- a/fs/ubifs/xattr.c |
4824 | +++ b/fs/ubifs/xattr.c |
4825 | @@ -172,6 +172,7 @@ out_cancel: |
4826 | host_ui->xattr_cnt -= 1; |
4827 | host_ui->xattr_size -= CALC_DENT_SIZE(nm->len); |
4828 | host_ui->xattr_size -= CALC_XATTR_BYTES(size); |
4829 | + host_ui->xattr_names -= nm->len; |
4830 | mutex_unlock(&host_ui->ui_mutex); |
4831 | out_free: |
4832 | make_bad_inode(inode); |
4833 | @@ -476,6 +477,7 @@ out_cancel: |
4834 | host_ui->xattr_cnt += 1; |
4835 | host_ui->xattr_size += CALC_DENT_SIZE(nm->len); |
4836 | host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); |
4837 | + host_ui->xattr_names += nm->len; |
4838 | mutex_unlock(&host_ui->ui_mutex); |
4839 | ubifs_release_budget(c, &req); |
4840 | make_bad_inode(inode); |
4841 | diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h |
4842 | index 29050337d9d5..da59fd9cdb5e 100644 |
4843 | --- a/include/dt-bindings/clock/imx6qdl-clock.h |
4844 | +++ b/include/dt-bindings/clock/imx6qdl-clock.h |
4845 | @@ -269,6 +269,8 @@ |
4846 | #define IMX6QDL_CLK_PRG0_APB 256 |
4847 | #define IMX6QDL_CLK_PRG1_APB 257 |
4848 | #define IMX6QDL_CLK_PRE_AXI 258 |
4849 | -#define IMX6QDL_CLK_END 259 |
4850 | +#define IMX6QDL_CLK_MLB_SEL 259 |
4851 | +#define IMX6QDL_CLK_MLB_PODF 260 |
4852 | +#define IMX6QDL_CLK_END 261 |
4853 | |
4854 | #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */ |
4855 | diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h |
4856 | index 631ba33bbe9f..32dc0cbd51ca 100644 |
4857 | --- a/include/linux/cpufreq.h |
4858 | +++ b/include/linux/cpufreq.h |
4859 | @@ -639,19 +639,19 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, |
4860 | unsigned int target_freq) |
4861 | { |
4862 | struct cpufreq_frequency_table *table = policy->freq_table; |
4863 | + struct cpufreq_frequency_table *pos, *best = table - 1; |
4864 | unsigned int freq; |
4865 | - int i, best = -1; |
4866 | |
4867 | - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
4868 | - freq = table[i].frequency; |
4869 | + cpufreq_for_each_valid_entry(pos, table) { |
4870 | + freq = pos->frequency; |
4871 | |
4872 | if (freq >= target_freq) |
4873 | - return i; |
4874 | + return pos - table; |
4875 | |
4876 | - best = i; |
4877 | + best = pos; |
4878 | } |
4879 | |
4880 | - return best; |
4881 | + return best - table; |
4882 | } |
4883 | |
4884 | /* Find lowest freq at or above target in a table in descending order */ |
4885 | @@ -659,28 +659,28 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, |
4886 | unsigned int target_freq) |
4887 | { |
4888 | struct cpufreq_frequency_table *table = policy->freq_table; |
4889 | + struct cpufreq_frequency_table *pos, *best = table - 1; |
4890 | unsigned int freq; |
4891 | - int i, best = -1; |
4892 | |
4893 | - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
4894 | - freq = table[i].frequency; |
4895 | + cpufreq_for_each_valid_entry(pos, table) { |
4896 | + freq = pos->frequency; |
4897 | |
4898 | if (freq == target_freq) |
4899 | - return i; |
4900 | + return pos - table; |
4901 | |
4902 | if (freq > target_freq) { |
4903 | - best = i; |
4904 | + best = pos; |
4905 | continue; |
4906 | } |
4907 | |
4908 | /* No freq found above target_freq */ |
4909 | - if (best == -1) |
4910 | - return i; |
4911 | + if (best == table - 1) |
4912 | + return pos - table; |
4913 | |
4914 | - return best; |
4915 | + return best - table; |
4916 | } |
4917 | |
4918 | - return best; |
4919 | + return best - table; |
4920 | } |
4921 | |
4922 | /* Works only on sorted freq-tables */ |
4923 | @@ -700,28 +700,28 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, |
4924 | unsigned int target_freq) |
4925 | { |
4926 | struct cpufreq_frequency_table *table = policy->freq_table; |
4927 | + struct cpufreq_frequency_table *pos, *best = table - 1; |
4928 | unsigned int freq; |
4929 | - int i, best = -1; |
4930 | |
4931 | - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
4932 | - freq = table[i].frequency; |
4933 | + cpufreq_for_each_valid_entry(pos, table) { |
4934 | + freq = pos->frequency; |
4935 | |
4936 | if (freq == target_freq) |
4937 | - return i; |
4938 | + return pos - table; |
4939 | |
4940 | if (freq < target_freq) { |
4941 | - best = i; |
4942 | + best = pos; |
4943 | continue; |
4944 | } |
4945 | |
4946 | /* No freq found below target_freq */ |
4947 | - if (best == -1) |
4948 | - return i; |
4949 | + if (best == table - 1) |
4950 | + return pos - table; |
4951 | |
4952 | - return best; |
4953 | + return best - table; |
4954 | } |
4955 | |
4956 | - return best; |
4957 | + return best - table; |
4958 | } |
4959 | |
4960 | /* Find highest freq at or below target in a table in descending order */ |
4961 | @@ -729,19 +729,19 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, |
4962 | unsigned int target_freq) |
4963 | { |
4964 | struct cpufreq_frequency_table *table = policy->freq_table; |
4965 | + struct cpufreq_frequency_table *pos, *best = table - 1; |
4966 | unsigned int freq; |
4967 | - int i, best = -1; |
4968 | |
4969 | - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
4970 | - freq = table[i].frequency; |
4971 | + cpufreq_for_each_valid_entry(pos, table) { |
4972 | + freq = pos->frequency; |
4973 | |
4974 | if (freq <= target_freq) |
4975 | - return i; |
4976 | + return pos - table; |
4977 | |
4978 | - best = i; |
4979 | + best = pos; |
4980 | } |
4981 | |
4982 | - return best; |
4983 | + return best - table; |
4984 | } |
4985 | |
4986 | /* Works only on sorted freq-tables */ |
4987 | @@ -761,32 +761,32 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, |
4988 | unsigned int target_freq) |
4989 | { |
4990 | struct cpufreq_frequency_table *table = policy->freq_table; |
4991 | + struct cpufreq_frequency_table *pos, *best = table - 1; |
4992 | unsigned int freq; |
4993 | - int i, best = -1; |
4994 | |
4995 | - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
4996 | - freq = table[i].frequency; |
4997 | + cpufreq_for_each_valid_entry(pos, table) { |
4998 | + freq = pos->frequency; |
4999 | |
5000 | if (freq == target_freq) |
5001 | - return i; |
5002 | + return pos - table; |
5003 | |
5004 | if (freq < target_freq) { |
5005 | - best = i; |
5006 | + best = pos; |
5007 | continue; |
5008 | } |
5009 | |
5010 | /* No freq found below target_freq */ |
5011 | - if (best == -1) |
5012 | - return i; |
5013 | + if (best == table - 1) |
5014 | + return pos - table; |
5015 | |
5016 | /* Choose the closest freq */ |
5017 | - if (target_freq - table[best].frequency > freq - target_freq) |
5018 | - return i; |
5019 | + if (target_freq - best->frequency > freq - target_freq) |
5020 | + return pos - table; |
5021 | |
5022 | - return best; |
5023 | + return best - table; |
5024 | } |
5025 | |
5026 | - return best; |
5027 | + return best - table; |
5028 | } |
5029 | |
5030 | /* Find closest freq to target in a table in descending order */ |
5031 | @@ -794,32 +794,32 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, |
5032 | unsigned int target_freq) |
5033 | { |
5034 | struct cpufreq_frequency_table *table = policy->freq_table; |
5035 | + struct cpufreq_frequency_table *pos, *best = table - 1; |
5036 | unsigned int freq; |
5037 | - int i, best = -1; |
5038 | |
5039 | - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { |
5040 | - freq = table[i].frequency; |
5041 | + cpufreq_for_each_valid_entry(pos, table) { |
5042 | + freq = pos->frequency; |
5043 | |
5044 | if (freq == target_freq) |
5045 | - return i; |
5046 | + return pos - table; |
5047 | |
5048 | if (freq > target_freq) { |
5049 | - best = i; |
5050 | + best = pos; |
5051 | continue; |
5052 | } |
5053 | |
5054 | /* No freq found above target_freq */ |
5055 | - if (best == -1) |
5056 | - return i; |
5057 | + if (best == table - 1) |
5058 | + return pos - table; |
5059 | |
5060 | /* Choose the closest freq */ |
5061 | - if (table[best].frequency - target_freq > target_freq - freq) |
5062 | - return i; |
5063 | + if (best->frequency - target_freq > target_freq - freq) |
5064 | + return pos - table; |
5065 | |
5066 | - return best; |
5067 | + return best - table; |
5068 | } |
5069 | |
5070 | - return best; |
5071 | + return best - table; |
5072 | } |
5073 | |
5074 | /* Works only on sorted freq-tables */ |
5075 | diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h |
5076 | index 0a83a1e648b0..4db00b02ca3f 100644 |
5077 | --- a/include/linux/devfreq-event.h |
5078 | +++ b/include/linux/devfreq-event.h |
5079 | @@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev) |
5080 | return -EINVAL; |
5081 | } |
5082 | |
5083 | -static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev) |
5084 | -{ |
5085 | - return ERR_PTR(-EINVAL); |
5086 | -} |
5087 | - |
5088 | static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( |
5089 | struct device *dev, int index) |
5090 | { |
5091 | diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h |
5092 | index 99ac022edc60..3a8610ea6ab7 100644 |
5093 | --- a/include/linux/irqchip/arm-gic-v3.h |
5094 | +++ b/include/linux/irqchip/arm-gic-v3.h |
5095 | @@ -290,7 +290,7 @@ |
5096 | #define GITS_BASER_TYPE_SHIFT (56) |
5097 | #define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) |
5098 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) |
5099 | -#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) |
5100 | +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) |
5101 | #define GITS_BASER_SHAREABILITY_SHIFT (10) |
5102 | #define GITS_BASER_InnerShareable \ |
5103 | GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) |
5104 | diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h |
5105 | index fb8e3b6febdf..c2119008990a 100644 |
5106 | --- a/include/target/target_core_base.h |
5107 | +++ b/include/target/target_core_base.h |
5108 | @@ -177,6 +177,7 @@ enum tcm_sense_reason_table { |
5109 | TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15), |
5110 | TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16), |
5111 | TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17), |
5112 | + TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18), |
5113 | #undef R |
5114 | }; |
5115 | |
5116 | diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
5117 | index 039de34f1521..8b3610c871f2 100644 |
5118 | --- a/kernel/sched/fair.c |
5119 | +++ b/kernel/sched/fair.c |
5120 | @@ -456,17 +456,23 @@ static inline int entity_before(struct sched_entity *a, |
5121 | |
5122 | static void update_min_vruntime(struct cfs_rq *cfs_rq) |
5123 | { |
5124 | + struct sched_entity *curr = cfs_rq->curr; |
5125 | + |
5126 | u64 vruntime = cfs_rq->min_vruntime; |
5127 | |
5128 | - if (cfs_rq->curr) |
5129 | - vruntime = cfs_rq->curr->vruntime; |
5130 | + if (curr) { |
5131 | + if (curr->on_rq) |
5132 | + vruntime = curr->vruntime; |
5133 | + else |
5134 | + curr = NULL; |
5135 | + } |
5136 | |
5137 | if (cfs_rq->rb_leftmost) { |
5138 | struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, |
5139 | struct sched_entity, |
5140 | run_node); |
5141 | |
5142 | - if (!cfs_rq->curr) |
5143 | + if (!curr) |
5144 | vruntime = se->vruntime; |
5145 | else |
5146 | vruntime = min_vruntime(vruntime, se->vruntime); |
5147 | @@ -680,7 +686,14 @@ void init_entity_runnable_average(struct sched_entity *se) |
5148 | * will definitely be update (after enqueue). |
5149 | */ |
5150 | sa->period_contrib = 1023; |
5151 | - sa->load_avg = scale_load_down(se->load.weight); |
5152 | + /* |
5153 | + * Tasks are intialized with full load to be seen as heavy tasks until |
5154 | + * they get a chance to stabilize to their real load level. |
5155 | + * Group entities are intialized with zero load to reflect the fact that |
5156 | + * nothing has been attached to the task group yet. |
5157 | + */ |
5158 | + if (entity_is_task(se)) |
5159 | + sa->load_avg = scale_load_down(se->load.weight); |
5160 | sa->load_sum = sa->load_avg * LOAD_AVG_MAX; |
5161 | /* |
5162 | * At this point, util_avg won't be used in select_task_rq_fair anyway |
5163 | @@ -3459,9 +3472,10 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
5164 | account_entity_dequeue(cfs_rq, se); |
5165 | |
5166 | /* |
5167 | - * Normalize the entity after updating the min_vruntime because the |
5168 | - * update can refer to the ->curr item and we need to reflect this |
5169 | - * movement in our normalized position. |
5170 | + * Normalize after update_curr(); which will also have moved |
5171 | + * min_vruntime if @se is the one holding it back. But before doing |
5172 | + * update_min_vruntime() again, which will discount @se's position and |
5173 | + * can move min_vruntime forward still more. |
5174 | */ |
5175 | if (!(flags & DEQUEUE_SLEEP)) |
5176 | se->vruntime -= cfs_rq->min_vruntime; |
5177 | @@ -3469,8 +3483,16 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
5178 | /* return excess runtime on last dequeue */ |
5179 | return_cfs_rq_runtime(cfs_rq); |
5180 | |
5181 | - update_min_vruntime(cfs_rq); |
5182 | update_cfs_shares(cfs_rq); |
5183 | + |
5184 | + /* |
5185 | + * Now advance min_vruntime if @se was the entity holding it back, |
5186 | + * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be |
5187 | + * put back on, and if we advance min_vruntime, we'll be placed back |
5188 | + * further than we started -- ie. we'll be penalized. |
5189 | + */ |
5190 | + if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) |
5191 | + update_min_vruntime(cfs_rq); |
5192 | } |
5193 | |
5194 | /* |
5195 | diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c |
5196 | index bf168838a029..e72581da9648 100644 |
5197 | --- a/net/sunrpc/xprtsock.c |
5198 | +++ b/net/sunrpc/xprtsock.c |
5199 | @@ -473,7 +473,16 @@ static int xs_nospace(struct rpc_task *task) |
5200 | spin_unlock_bh(&xprt->transport_lock); |
5201 | |
5202 | /* Race breaker in case memory is freed before above code is called */ |
5203 | - sk->sk_write_space(sk); |
5204 | + if (ret == -EAGAIN) { |
5205 | + struct socket_wq *wq; |
5206 | + |
5207 | + rcu_read_lock(); |
5208 | + wq = rcu_dereference(sk->sk_wq); |
5209 | + set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); |
5210 | + rcu_read_unlock(); |
5211 | + |
5212 | + sk->sk_write_space(sk); |
5213 | + } |
5214 | return ret; |
5215 | } |
5216 | |
5217 | diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c |
5218 | index 9c22f95838ef..19d41da79f93 100644 |
5219 | --- a/sound/pci/hda/dell_wmi_helper.c |
5220 | +++ b/sound/pci/hda/dell_wmi_helper.c |
5221 | @@ -49,7 +49,7 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec, |
5222 | removefunc = true; |
5223 | if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) { |
5224 | dell_led_value = 0; |
5225 | - if (spec->gen.num_adc_nids > 1) |
5226 | + if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch) |
5227 | codec_dbg(codec, "Skipping micmute LED control due to several ADCs"); |
5228 | else { |
5229 | dell_old_cap_hook = spec->gen.cap_sync_hook; |
5230 | diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c |
5231 | index f0955fd7a2e7..6a23302297c9 100644 |
5232 | --- a/sound/pci/hda/thinkpad_helper.c |
5233 | +++ b/sound/pci/hda/thinkpad_helper.c |
5234 | @@ -62,7 +62,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, |
5235 | removefunc = false; |
5236 | } |
5237 | if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { |
5238 | - if (spec->num_adc_nids > 1) |
5239 | + if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch) |
5240 | codec_dbg(codec, |
5241 | "Skipping micmute LED control due to several ADCs"); |
5242 | else { |
5243 | diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c |
5244 | index 8ff6c6a61291..c9c8dc330116 100644 |
5245 | --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c |
5246 | +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c |
5247 | @@ -89,6 +89,7 @@ struct intel_pt_decoder { |
5248 | bool pge; |
5249 | bool have_tma; |
5250 | bool have_cyc; |
5251 | + bool fixup_last_mtc; |
5252 | uint64_t pos; |
5253 | uint64_t last_ip; |
5254 | uint64_t ip; |
5255 | @@ -584,10 +585,31 @@ struct intel_pt_calc_cyc_to_tsc_info { |
5256 | uint64_t tsc_timestamp; |
5257 | uint64_t timestamp; |
5258 | bool have_tma; |
5259 | + bool fixup_last_mtc; |
5260 | bool from_mtc; |
5261 | double cbr_cyc_to_tsc; |
5262 | }; |
5263 | |
5264 | +/* |
5265 | + * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower |
5266 | + * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC |
5267 | + * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA |
5268 | + * packet by copying the missing bits from the current MTC assuming the least |
5269 | + * difference between the two, and that the current MTC comes after last_mtc. |
5270 | + */ |
5271 | +static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift, |
5272 | + uint32_t *last_mtc) |
5273 | +{ |
5274 | + uint32_t first_missing_bit = 1U << (16 - mtc_shift); |
5275 | + uint32_t mask = ~(first_missing_bit - 1); |
5276 | + |
5277 | + *last_mtc |= mtc & mask; |
5278 | + if (*last_mtc >= mtc) { |
5279 | + *last_mtc -= first_missing_bit; |
5280 | + *last_mtc &= 0xff; |
5281 | + } |
5282 | +} |
5283 | + |
5284 | static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) |
5285 | { |
5286 | struct intel_pt_decoder *decoder = pkt_info->decoder; |
5287 | @@ -617,6 +639,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) |
5288 | return 0; |
5289 | |
5290 | mtc = pkt_info->packet.payload; |
5291 | + if (decoder->mtc_shift > 8 && data->fixup_last_mtc) { |
5292 | + data->fixup_last_mtc = false; |
5293 | + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift, |
5294 | + &data->last_mtc); |
5295 | + } |
5296 | if (mtc > data->last_mtc) |
5297 | mtc_delta = mtc - data->last_mtc; |
5298 | else |
5299 | @@ -685,6 +712,7 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info) |
5300 | |
5301 | data->ctc_delta = 0; |
5302 | data->have_tma = true; |
5303 | + data->fixup_last_mtc = true; |
5304 | |
5305 | return 0; |
5306 | |
5307 | @@ -751,6 +779,7 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder, |
5308 | .tsc_timestamp = decoder->tsc_timestamp, |
5309 | .timestamp = decoder->timestamp, |
5310 | .have_tma = decoder->have_tma, |
5311 | + .fixup_last_mtc = decoder->fixup_last_mtc, |
5312 | .from_mtc = from_mtc, |
5313 | .cbr_cyc_to_tsc = 0, |
5314 | }; |
5315 | @@ -1241,6 +1270,7 @@ static void intel_pt_calc_tma(struct intel_pt_decoder *decoder) |
5316 | } |
5317 | decoder->ctc_delta = 0; |
5318 | decoder->have_tma = true; |
5319 | + decoder->fixup_last_mtc = true; |
5320 | intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n", |
5321 | decoder->ctc_timestamp, decoder->last_mtc, ctc_rem); |
5322 | } |
5323 | @@ -1255,6 +1285,12 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder) |
5324 | |
5325 | mtc = decoder->packet.payload; |
5326 | |
5327 | + if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) { |
5328 | + decoder->fixup_last_mtc = false; |
5329 | + intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift, |
5330 | + &decoder->last_mtc); |
5331 | + } |
5332 | + |
5333 | if (mtc > decoder->last_mtc) |
5334 | mtc_delta = mtc - decoder->last_mtc; |
5335 | else |
5336 | @@ -1323,6 +1359,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder) |
5337 | timestamp, decoder->timestamp); |
5338 | else |
5339 | decoder->timestamp = timestamp; |
5340 | + |
5341 | + decoder->timestamp_insn_cnt = 0; |
5342 | } |
5343 | |
5344 | /* Walk PSB+ packets when already in sync. */ |
5345 | diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c |
5346 | index 551ff6f640be..b2878d2b2d67 100644 |
5347 | --- a/tools/perf/util/intel-pt.c |
5348 | +++ b/tools/perf/util/intel-pt.c |
5349 | @@ -241,7 +241,7 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) |
5350 | } |
5351 | |
5352 | queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; |
5353 | - |
5354 | +next: |
5355 | buffer = auxtrace_buffer__next(queue, buffer); |
5356 | if (!buffer) { |
5357 | if (old_buffer) |
5358 | @@ -264,9 +264,6 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) |
5359 | intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) |
5360 | return -ENOMEM; |
5361 | |
5362 | - if (old_buffer) |
5363 | - auxtrace_buffer__drop_data(old_buffer); |
5364 | - |
5365 | if (buffer->use_data) { |
5366 | b->len = buffer->use_size; |
5367 | b->buf = buffer->use_data; |
5368 | @@ -276,6 +273,16 @@ static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) |
5369 | } |
5370 | b->ref_timestamp = buffer->reference; |
5371 | |
5372 | + /* |
5373 | + * If in snapshot mode and the buffer has no usable data, get next |
5374 | + * buffer and again check overlap against old_buffer. |
5375 | + */ |
5376 | + if (ptq->pt->snapshot_mode && !b->len) |
5377 | + goto next; |
5378 | + |
5379 | + if (old_buffer) |
5380 | + auxtrace_buffer__drop_data(old_buffer); |
5381 | + |
5382 | if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode && |
5383 | !buffer->consecutive)) { |
5384 | b->consecutive = false; |
5385 | diff --git a/tools/spi/spidev_test.c b/tools/spi/spidev_test.c |
5386 | index 8a73d8185316..f3825b676e38 100644 |
5387 | --- a/tools/spi/spidev_test.c |
5388 | +++ b/tools/spi/spidev_test.c |
5389 | @@ -284,7 +284,7 @@ static void parse_opts(int argc, char *argv[]) |
5390 | |
5391 | static void transfer_escaped_string(int fd, char *str) |
5392 | { |
5393 | - size_t size = strlen(str + 1); |
5394 | + size_t size = strlen(str); |
5395 | uint8_t *tx; |
5396 | uint8_t *rx; |
5397 |