Contents of /trunk/kernel-alx/patches-4.9/0167-4.9.68-all-fixes.patch
Parent Directory | Revision Log
Revision 3052 -
(show annotations)
(download)
Wed Dec 20 11:50:01 2017 UTC (6 years, 9 months ago) by niro
File size: 112753 byte(s)
Wed Dec 20 11:50:01 2017 UTC (6 years, 9 months ago) by niro
File size: 112753 byte(s)
-linux-4.9.68
1 | diff --git a/Makefile b/Makefile |
2 | index 70546af61a0a..dfe17af517b2 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 67 |
9 | +SUBLEVEL = 68 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c |
14 | index f6ba589cd312..c821c1d5610e 100644 |
15 | --- a/arch/arm/mach-omap1/dma.c |
16 | +++ b/arch/arm/mach-omap1/dma.c |
17 | @@ -32,7 +32,6 @@ |
18 | #include "soc.h" |
19 | |
20 | #define OMAP1_DMA_BASE (0xfffed800) |
21 | -#define OMAP1_LOGICAL_DMA_CH_COUNT 17 |
22 | |
23 | static u32 enable_1510_mode; |
24 | |
25 | @@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void) |
26 | goto exit_iounmap; |
27 | } |
28 | |
29 | - d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; |
30 | - |
31 | /* Valid attributes for omap1 plus processors */ |
32 | if (cpu_is_omap15xx()) |
33 | d->dev_caps = ENABLE_1510_MODE; |
34 | @@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void) |
35 | d->dev_caps |= CLEAR_CSR_ON_READ; |
36 | d->dev_caps |= IS_WORD_16; |
37 | |
38 | - if (cpu_is_omap15xx()) |
39 | - d->chan_count = 9; |
40 | - else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { |
41 | - if (!(d->dev_caps & ENABLE_1510_MODE)) |
42 | - d->chan_count = 16; |
43 | + /* available logical channels */ |
44 | + if (cpu_is_omap15xx()) { |
45 | + d->lch_count = 9; |
46 | + } else { |
47 | + if (d->dev_caps & ENABLE_1510_MODE) |
48 | + d->lch_count = 9; |
49 | else |
50 | - d->chan_count = 9; |
51 | + d->lch_count = 16; |
52 | } |
53 | |
54 | p = dma_plat_info; |
55 | diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c |
56 | index da310bb779b9..88676fe9b119 100644 |
57 | --- a/arch/arm/mach-omap2/pdata-quirks.c |
58 | +++ b/arch/arm/mach-omap2/pdata-quirks.c |
59 | @@ -147,7 +147,7 @@ static struct ti_st_plat_data wilink_pdata = { |
60 | .nshutdown_gpio = 137, |
61 | .dev_name = "/dev/ttyO1", |
62 | .flow_cntrl = 1, |
63 | - .baud_rate = 300000, |
64 | + .baud_rate = 3000000, |
65 | }; |
66 | |
67 | static struct platform_device wl18xx_device = { |
68 | diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c |
69 | index 87131cd3bc8f..6d3a50446b21 100644 |
70 | --- a/arch/m68k/mm/mcfmmu.c |
71 | +++ b/arch/m68k/mm/mcfmmu.c |
72 | @@ -169,7 +169,7 @@ void __init cf_bootmem_alloc(void) |
73 | max_pfn = max_low_pfn = PFN_DOWN(_ramend); |
74 | high_memory = (void *)_ramend; |
75 | |
76 | - m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; |
77 | + m68k_virt_to_node_shift = fls(_ramend - 1) - 6; |
78 | module_fixup(NULL, __start_fixup, __stop_fixup); |
79 | |
80 | /* setup bootmem data */ |
81 | diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h |
82 | index f61cad3de4e6..4c935f7504f7 100644 |
83 | --- a/arch/powerpc/include/asm/book3s/64/hash.h |
84 | +++ b/arch/powerpc/include/asm/book3s/64/hash.h |
85 | @@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start, |
86 | unsigned long phys); |
87 | extern void hash__vmemmap_remove_mapping(unsigned long start, |
88 | unsigned long page_size); |
89 | + |
90 | +int hash__create_section_mapping(unsigned long start, unsigned long end); |
91 | +int hash__remove_section_mapping(unsigned long start, unsigned long end); |
92 | + |
93 | #endif /* !__ASSEMBLY__ */ |
94 | #endif /* __KERNEL__ */ |
95 | #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */ |
96 | diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c |
97 | index 78dabf065ba9..bd666287c5ed 100644 |
98 | --- a/arch/powerpc/mm/hash_utils_64.c |
99 | +++ b/arch/powerpc/mm/hash_utils_64.c |
100 | @@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void) |
101 | } |
102 | |
103 | #ifdef CONFIG_MEMORY_HOTPLUG |
104 | -int create_section_mapping(unsigned long start, unsigned long end) |
105 | +int hash__create_section_mapping(unsigned long start, unsigned long end) |
106 | { |
107 | int rc = htab_bolt_mapping(start, end, __pa(start), |
108 | pgprot_val(PAGE_KERNEL), mmu_linear_psize, |
109 | @@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end) |
110 | return rc; |
111 | } |
112 | |
113 | -int remove_section_mapping(unsigned long start, unsigned long end) |
114 | +int hash__remove_section_mapping(unsigned long start, unsigned long end) |
115 | { |
116 | int rc = htab_remove_mapping(start, end, mmu_linear_psize, |
117 | mmu_kernel_ssize); |
118 | diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c |
119 | index f4f437cbabf1..0fad7f6742ff 100644 |
120 | --- a/arch/powerpc/mm/pgtable-book3s64.c |
121 | +++ b/arch/powerpc/mm/pgtable-book3s64.c |
122 | @@ -125,3 +125,21 @@ void mmu_cleanup_all(void) |
123 | else if (mmu_hash_ops.hpte_clear_all) |
124 | mmu_hash_ops.hpte_clear_all(); |
125 | } |
126 | + |
127 | +#ifdef CONFIG_MEMORY_HOTPLUG |
128 | +int create_section_mapping(unsigned long start, unsigned long end) |
129 | +{ |
130 | + if (radix_enabled()) |
131 | + return -ENODEV; |
132 | + |
133 | + return hash__create_section_mapping(start, end); |
134 | +} |
135 | + |
136 | +int remove_section_mapping(unsigned long start, unsigned long end) |
137 | +{ |
138 | + if (radix_enabled()) |
139 | + return -ENODEV; |
140 | + |
141 | + return hash__remove_section_mapping(start, end); |
142 | +} |
143 | +#endif /* CONFIG_MEMORY_HOTPLUG */ |
144 | diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h |
145 | index 649eb62c52b3..9e02cb7955c1 100644 |
146 | --- a/arch/s390/include/asm/pci_insn.h |
147 | +++ b/arch/s390/include/asm/pci_insn.h |
148 | @@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range); |
149 | int zpci_load(u64 *data, u64 req, u64 offset); |
150 | int zpci_store(u64 data, u64 req, u64 offset); |
151 | int zpci_store_block(const u64 *data, u64 req, u64 offset); |
152 | -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); |
153 | +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); |
154 | |
155 | #endif |
156 | diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h |
157 | index 402ad6df4897..c54a9310d814 100644 |
158 | --- a/arch/s390/include/asm/runtime_instr.h |
159 | +++ b/arch/s390/include/asm/runtime_instr.h |
160 | @@ -85,6 +85,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next, |
161 | load_runtime_instr_cb(&runtime_instr_empty_cb); |
162 | } |
163 | |
164 | -void exit_thread_runtime_instr(void); |
165 | +struct task_struct; |
166 | + |
167 | +void runtime_instr_release(struct task_struct *tsk); |
168 | |
169 | #endif /* _RUNTIME_INSTR_H */ |
170 | diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c |
171 | index 172fe1121d99..8382fc62cde6 100644 |
172 | --- a/arch/s390/kernel/process.c |
173 | +++ b/arch/s390/kernel/process.c |
174 | @@ -70,8 +70,6 @@ extern void kernel_thread_starter(void); |
175 | */ |
176 | void exit_thread(struct task_struct *tsk) |
177 | { |
178 | - if (tsk == current) |
179 | - exit_thread_runtime_instr(); |
180 | } |
181 | |
182 | void flush_thread(void) |
183 | @@ -84,6 +82,7 @@ void release_thread(struct task_struct *dead_task) |
184 | |
185 | void arch_release_task_struct(struct task_struct *tsk) |
186 | { |
187 | + runtime_instr_release(tsk); |
188 | } |
189 | |
190 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
191 | diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c |
192 | index 70cdb03d4acd..fd03a7569e10 100644 |
193 | --- a/arch/s390/kernel/runtime_instr.c |
194 | +++ b/arch/s390/kernel/runtime_instr.c |
195 | @@ -18,11 +18,24 @@ |
196 | /* empty control block to disable RI by loading it */ |
197 | struct runtime_instr_cb runtime_instr_empty_cb; |
198 | |
199 | +void runtime_instr_release(struct task_struct *tsk) |
200 | +{ |
201 | + kfree(tsk->thread.ri_cb); |
202 | +} |
203 | + |
204 | static void disable_runtime_instr(void) |
205 | { |
206 | - struct pt_regs *regs = task_pt_regs(current); |
207 | + struct task_struct *task = current; |
208 | + struct pt_regs *regs; |
209 | |
210 | + if (!task->thread.ri_cb) |
211 | + return; |
212 | + regs = task_pt_regs(task); |
213 | + preempt_disable(); |
214 | load_runtime_instr_cb(&runtime_instr_empty_cb); |
215 | + kfree(task->thread.ri_cb); |
216 | + task->thread.ri_cb = NULL; |
217 | + preempt_enable(); |
218 | |
219 | /* |
220 | * Make sure the RI bit is deleted from the PSW. If the user did not |
221 | @@ -43,19 +56,6 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb) |
222 | cb->valid = 1; |
223 | } |
224 | |
225 | -void exit_thread_runtime_instr(void) |
226 | -{ |
227 | - struct task_struct *task = current; |
228 | - |
229 | - preempt_disable(); |
230 | - if (!task->thread.ri_cb) |
231 | - return; |
232 | - disable_runtime_instr(); |
233 | - kfree(task->thread.ri_cb); |
234 | - task->thread.ri_cb = NULL; |
235 | - preempt_enable(); |
236 | -} |
237 | - |
238 | SYSCALL_DEFINE1(s390_runtime_instr, int, command) |
239 | { |
240 | struct runtime_instr_cb *cb; |
241 | @@ -64,7 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command) |
242 | return -EOPNOTSUPP; |
243 | |
244 | if (command == S390_RUNTIME_INSTR_STOP) { |
245 | - exit_thread_runtime_instr(); |
246 | + disable_runtime_instr(); |
247 | return 0; |
248 | } |
249 | |
250 | diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c |
251 | index 15ffc19c8c0c..03a1d5976ff5 100644 |
252 | --- a/arch/s390/pci/pci.c |
253 | +++ b/arch/s390/pci/pci.c |
254 | @@ -354,7 +354,8 @@ static void zpci_irq_handler(struct airq_struct *airq) |
255 | /* End of second scan with interrupts on. */ |
256 | break; |
257 | /* First scan complete, reenable interrupts. */ |
258 | - zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); |
259 | + if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC)) |
260 | + break; |
261 | si = 0; |
262 | continue; |
263 | } |
264 | @@ -928,7 +929,7 @@ static int __init pci_base_init(void) |
265 | if (!s390_pci_probe) |
266 | return 0; |
267 | |
268 | - if (!test_facility(69) || !test_facility(71) || !test_facility(72)) |
269 | + if (!test_facility(69) || !test_facility(71)) |
270 | return 0; |
271 | |
272 | rc = zpci_debug_init(); |
273 | diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c |
274 | index fa8d7d4b9751..248146dcfce3 100644 |
275 | --- a/arch/s390/pci/pci_insn.c |
276 | +++ b/arch/s390/pci/pci_insn.c |
277 | @@ -7,6 +7,7 @@ |
278 | #include <linux/export.h> |
279 | #include <linux/errno.h> |
280 | #include <linux/delay.h> |
281 | +#include <asm/facility.h> |
282 | #include <asm/pci_insn.h> |
283 | #include <asm/pci_debug.h> |
284 | #include <asm/processor.h> |
285 | @@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) |
286 | } |
287 | |
288 | /* Set Interruption Controls */ |
289 | -void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) |
290 | +int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) |
291 | { |
292 | + if (!test_facility(72)) |
293 | + return -EIO; |
294 | asm volatile ( |
295 | " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" |
296 | : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused)); |
297 | + return 0; |
298 | } |
299 | |
300 | /* PCI Load */ |
301 | diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c |
302 | index be202390bbd3..9dfeeeca0ea8 100644 |
303 | --- a/arch/x86/events/intel/ds.c |
304 | +++ b/arch/x86/events/intel/ds.c |
305 | @@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) |
306 | continue; |
307 | |
308 | /* log dropped samples number */ |
309 | - if (error[bit]) |
310 | + if (error[bit]) { |
311 | perf_log_lost_samples(event, error[bit]); |
312 | |
313 | + if (perf_event_account_interrupt(event)) |
314 | + x86_pmu_stop(event, 0); |
315 | + } |
316 | + |
317 | if (counts[bit]) { |
318 | __intel_pmu_pebs_event(event, iregs, base, |
319 | top, bit, counts[bit]); |
320 | diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h |
321 | index 91dfcafe27a6..bad25bb80679 100644 |
322 | --- a/arch/x86/include/asm/syscalls.h |
323 | +++ b/arch/x86/include/asm/syscalls.h |
324 | @@ -21,7 +21,7 @@ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); |
325 | asmlinkage long sys_iopl(unsigned int); |
326 | |
327 | /* kernel/ldt.c */ |
328 | -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); |
329 | +asmlinkage long sys_modify_ldt(int, void __user *, unsigned long); |
330 | |
331 | /* kernel/signal.c */ |
332 | asmlinkage long sys_rt_sigreturn(void); |
333 | diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c |
334 | index 095ef7ddd6ae..abfbb61b18b8 100644 |
335 | --- a/arch/x86/kernel/fpu/xstate.c |
336 | +++ b/arch/x86/kernel/fpu/xstate.c |
337 | @@ -1077,6 +1077,7 @@ int copyin_to_xsaves(const void *kbuf, const void __user *ubuf, |
338 | * Add back in the features that came in from userspace: |
339 | */ |
340 | xsave->header.xfeatures |= xfeatures; |
341 | + xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xsave->header.xfeatures; |
342 | |
343 | return 0; |
344 | } |
345 | diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c |
346 | index 5f8f0b3cc674..2c0b0b645a74 100644 |
347 | --- a/arch/x86/kernel/kprobes/ftrace.c |
348 | +++ b/arch/x86/kernel/kprobes/ftrace.c |
349 | @@ -26,7 +26,7 @@ |
350 | #include "common.h" |
351 | |
352 | static nokprobe_inline |
353 | -int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
354 | +void __skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
355 | struct kprobe_ctlblk *kcb, unsigned long orig_ip) |
356 | { |
357 | /* |
358 | @@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
359 | __this_cpu_write(current_kprobe, NULL); |
360 | if (orig_ip) |
361 | regs->ip = orig_ip; |
362 | - return 1; |
363 | } |
364 | |
365 | int skip_singlestep(struct kprobe *p, struct pt_regs *regs, |
366 | struct kprobe_ctlblk *kcb) |
367 | { |
368 | - if (kprobe_ftrace(p)) |
369 | - return __skip_singlestep(p, regs, kcb, 0); |
370 | - else |
371 | - return 0; |
372 | + if (kprobe_ftrace(p)) { |
373 | + __skip_singlestep(p, regs, kcb, 0); |
374 | + preempt_enable_no_resched(); |
375 | + return 1; |
376 | + } |
377 | + return 0; |
378 | } |
379 | NOKPROBE_SYMBOL(skip_singlestep); |
380 | |
381 | -/* Ftrace callback handler for kprobes */ |
382 | +/* Ftrace callback handler for kprobes -- called under preepmt disabed */ |
383 | void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
384 | struct ftrace_ops *ops, struct pt_regs *regs) |
385 | { |
386 | @@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
387 | /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ |
388 | regs->ip = ip + sizeof(kprobe_opcode_t); |
389 | |
390 | + /* To emulate trap based kprobes, preempt_disable here */ |
391 | + preempt_disable(); |
392 | __this_cpu_write(current_kprobe, p); |
393 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
394 | - if (!p->pre_handler || !p->pre_handler(p, regs)) |
395 | + if (!p->pre_handler || !p->pre_handler(p, regs)) { |
396 | __skip_singlestep(p, regs, kcb, orig_ip); |
397 | + preempt_enable_no_resched(); |
398 | + } |
399 | /* |
400 | * If pre_handler returns !0, it sets regs->ip and |
401 | - * resets current kprobe. |
402 | + * resets current kprobe, and keep preempt count +1. |
403 | */ |
404 | } |
405 | end: |
406 | diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c |
407 | index 6707039b9032..5f70014ca602 100644 |
408 | --- a/arch/x86/kernel/ldt.c |
409 | +++ b/arch/x86/kernel/ldt.c |
410 | @@ -12,6 +12,7 @@ |
411 | #include <linux/string.h> |
412 | #include <linux/mm.h> |
413 | #include <linux/smp.h> |
414 | +#include <linux/syscalls.h> |
415 | #include <linux/slab.h> |
416 | #include <linux/vmalloc.h> |
417 | #include <linux/uaccess.h> |
418 | @@ -271,8 +272,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) |
419 | return error; |
420 | } |
421 | |
422 | -asmlinkage int sys_modify_ldt(int func, void __user *ptr, |
423 | - unsigned long bytecount) |
424 | +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , |
425 | + unsigned long , bytecount) |
426 | { |
427 | int ret = -ENOSYS; |
428 | |
429 | @@ -290,5 +291,14 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr, |
430 | ret = write_ldt(ptr, bytecount, 0); |
431 | break; |
432 | } |
433 | - return ret; |
434 | + /* |
435 | + * The SYSCALL_DEFINE() macros give us an 'unsigned long' |
436 | + * return type, but tht ABI for sys_modify_ldt() expects |
437 | + * 'int'. This cast gives us an int-sized value in %rax |
438 | + * for the return code. The 'unsigned' is necessary so |
439 | + * the compiler does not try to sign-extend the negative |
440 | + * return codes into the high half of the register when |
441 | + * taking the value from int->long. |
442 | + */ |
443 | + return (unsigned int)ret; |
444 | } |
445 | diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c |
446 | index 836a1eb5df43..3ee234b6234d 100644 |
447 | --- a/arch/x86/um/ldt.c |
448 | +++ b/arch/x86/um/ldt.c |
449 | @@ -6,6 +6,7 @@ |
450 | #include <linux/mm.h> |
451 | #include <linux/sched.h> |
452 | #include <linux/slab.h> |
453 | +#include <linux/syscalls.h> |
454 | #include <linux/uaccess.h> |
455 | #include <asm/unistd.h> |
456 | #include <os.h> |
457 | @@ -369,7 +370,9 @@ void free_ldt(struct mm_context *mm) |
458 | mm->arch.ldt.entry_count = 0; |
459 | } |
460 | |
461 | -int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount) |
462 | +SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr , |
463 | + unsigned long , bytecount) |
464 | { |
465 | - return do_modify_ldt_skas(func, ptr, bytecount); |
466 | + /* See non-um modify_ldt() for why we do this cast */ |
467 | + return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount); |
468 | } |
469 | diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h |
470 | index 5d4c05074a5c..e2bcacc1a921 100644 |
471 | --- a/drivers/crypto/caam/intern.h |
472 | +++ b/drivers/crypto/caam/intern.h |
473 | @@ -41,6 +41,7 @@ struct caam_drv_private_jr { |
474 | struct device *dev; |
475 | int ridx; |
476 | struct caam_job_ring __iomem *rregs; /* JobR's register space */ |
477 | + struct tasklet_struct irqtask; |
478 | int irq; /* One per queue */ |
479 | |
480 | /* Number of scatterlist crypt transforms active on the JobR */ |
481 | diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c |
482 | index 757c27f9953d..9e7f28122bb7 100644 |
483 | --- a/drivers/crypto/caam/jr.c |
484 | +++ b/drivers/crypto/caam/jr.c |
485 | @@ -73,6 +73,8 @@ static int caam_jr_shutdown(struct device *dev) |
486 | |
487 | ret = caam_reset_hw_jr(dev); |
488 | |
489 | + tasklet_kill(&jrp->irqtask); |
490 | + |
491 | /* Release interrupt */ |
492 | free_irq(jrp->irq, dev); |
493 | |
494 | @@ -128,7 +130,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) |
495 | |
496 | /* |
497 | * Check the output ring for ready responses, kick |
498 | - * the threaded irq if jobs done. |
499 | + * tasklet if jobs done. |
500 | */ |
501 | irqstate = rd_reg32(&jrp->rregs->jrintstatus); |
502 | if (!irqstate) |
503 | @@ -150,13 +152,18 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) |
504 | /* Have valid interrupt at this point, just ACK and trigger */ |
505 | wr_reg32(&jrp->rregs->jrintstatus, irqstate); |
506 | |
507 | - return IRQ_WAKE_THREAD; |
508 | + preempt_disable(); |
509 | + tasklet_schedule(&jrp->irqtask); |
510 | + preempt_enable(); |
511 | + |
512 | + return IRQ_HANDLED; |
513 | } |
514 | |
515 | -static irqreturn_t caam_jr_threadirq(int irq, void *st_dev) |
516 | +/* Deferred service handler, run as interrupt-fired tasklet */ |
517 | +static void caam_jr_dequeue(unsigned long devarg) |
518 | { |
519 | int hw_idx, sw_idx, i, head, tail; |
520 | - struct device *dev = st_dev; |
521 | + struct device *dev = (struct device *)devarg; |
522 | struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); |
523 | void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); |
524 | u32 *userdesc, userstatus; |
525 | @@ -230,8 +237,6 @@ static irqreturn_t caam_jr_threadirq(int irq, void *st_dev) |
526 | |
527 | /* reenable / unmask IRQs */ |
528 | clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); |
529 | - |
530 | - return IRQ_HANDLED; |
531 | } |
532 | |
533 | /** |
534 | @@ -389,10 +394,11 @@ static int caam_jr_init(struct device *dev) |
535 | |
536 | jrp = dev_get_drvdata(dev); |
537 | |
538 | + tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev); |
539 | + |
540 | /* Connect job ring interrupt handler. */ |
541 | - error = request_threaded_irq(jrp->irq, caam_jr_interrupt, |
542 | - caam_jr_threadirq, IRQF_SHARED, |
543 | - dev_name(dev), dev); |
544 | + error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, |
545 | + dev_name(dev), dev); |
546 | if (error) { |
547 | dev_err(dev, "can't connect JobR %d interrupt (%d)\n", |
548 | jrp->ridx, jrp->irq); |
549 | @@ -454,6 +460,7 @@ static int caam_jr_init(struct device *dev) |
550 | out_free_irq: |
551 | free_irq(jrp->irq, dev); |
552 | out_kill_deq: |
553 | + tasklet_kill(&jrp->irqtask); |
554 | return error; |
555 | } |
556 | |
557 | diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c |
558 | index 4d51f9e83fa8..04bf29808200 100644 |
559 | --- a/drivers/dma-buf/fence.c |
560 | +++ b/drivers/dma-buf/fence.c |
561 | @@ -280,6 +280,31 @@ int fence_add_callback(struct fence *fence, struct fence_cb *cb, |
562 | } |
563 | EXPORT_SYMBOL(fence_add_callback); |
564 | |
565 | +/** |
566 | + * fence_get_status - returns the status upon completion |
567 | + * @fence: [in] the fence to query |
568 | + * |
569 | + * This wraps fence_get_status_locked() to return the error status |
570 | + * condition on a signaled fence. See fence_get_status_locked() for more |
571 | + * details. |
572 | + * |
573 | + * Returns 0 if the fence has not yet been signaled, 1 if the fence has |
574 | + * been signaled without an error condition, or a negative error code |
575 | + * if the fence has been completed in err. |
576 | + */ |
577 | +int fence_get_status(struct fence *fence) |
578 | +{ |
579 | + unsigned long flags; |
580 | + int status; |
581 | + |
582 | + spin_lock_irqsave(fence->lock, flags); |
583 | + status = fence_get_status_locked(fence); |
584 | + spin_unlock_irqrestore(fence->lock, flags); |
585 | + |
586 | + return status; |
587 | +} |
588 | +EXPORT_SYMBOL(fence_get_status); |
589 | + |
590 | /** |
591 | * fence_remove_callback - remove a callback from the signaling list |
592 | * @fence: [in] the fence to wait on |
593 | @@ -526,6 +551,7 @@ fence_init(struct fence *fence, const struct fence_ops *ops, |
594 | fence->context = context; |
595 | fence->seqno = seqno; |
596 | fence->flags = 0UL; |
597 | + fence->error = 0; |
598 | |
599 | trace_fence_init(fence); |
600 | } |
601 | diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c |
602 | index 62e8e6dc7953..4f3511415b29 100644 |
603 | --- a/drivers/dma-buf/sw_sync.c |
604 | +++ b/drivers/dma-buf/sw_sync.c |
605 | @@ -96,9 +96,9 @@ struct sync_timeline *sync_timeline_create(const char *name) |
606 | obj->context = fence_context_alloc(1); |
607 | strlcpy(obj->name, name, sizeof(obj->name)); |
608 | |
609 | - INIT_LIST_HEAD(&obj->child_list_head); |
610 | - INIT_LIST_HEAD(&obj->active_list_head); |
611 | - spin_lock_init(&obj->child_list_lock); |
612 | + obj->pt_tree = RB_ROOT; |
613 | + INIT_LIST_HEAD(&obj->pt_list); |
614 | + spin_lock_init(&obj->lock); |
615 | |
616 | sync_timeline_debug_add(obj); |
617 | |
618 | @@ -125,68 +125,6 @@ static void sync_timeline_put(struct sync_timeline *obj) |
619 | kref_put(&obj->kref, sync_timeline_free); |
620 | } |
621 | |
622 | -/** |
623 | - * sync_timeline_signal() - signal a status change on a sync_timeline |
624 | - * @obj: sync_timeline to signal |
625 | - * @inc: num to increment on timeline->value |
626 | - * |
627 | - * A sync implementation should call this any time one of it's fences |
628 | - * has signaled or has an error condition. |
629 | - */ |
630 | -static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) |
631 | -{ |
632 | - unsigned long flags; |
633 | - struct sync_pt *pt, *next; |
634 | - |
635 | - trace_sync_timeline(obj); |
636 | - |
637 | - spin_lock_irqsave(&obj->child_list_lock, flags); |
638 | - |
639 | - obj->value += inc; |
640 | - |
641 | - list_for_each_entry_safe(pt, next, &obj->active_list_head, |
642 | - active_list) { |
643 | - if (fence_is_signaled_locked(&pt->base)) |
644 | - list_del_init(&pt->active_list); |
645 | - } |
646 | - |
647 | - spin_unlock_irqrestore(&obj->child_list_lock, flags); |
648 | -} |
649 | - |
650 | -/** |
651 | - * sync_pt_create() - creates a sync pt |
652 | - * @parent: fence's parent sync_timeline |
653 | - * @size: size to allocate for this pt |
654 | - * @inc: value of the fence |
655 | - * |
656 | - * Creates a new sync_pt as a child of @parent. @size bytes will be |
657 | - * allocated allowing for implementation specific data to be kept after |
658 | - * the generic sync_timeline struct. Returns the sync_pt object or |
659 | - * NULL in case of error. |
660 | - */ |
661 | -static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size, |
662 | - unsigned int value) |
663 | -{ |
664 | - unsigned long flags; |
665 | - struct sync_pt *pt; |
666 | - |
667 | - if (size < sizeof(*pt)) |
668 | - return NULL; |
669 | - |
670 | - pt = kzalloc(size, GFP_KERNEL); |
671 | - if (!pt) |
672 | - return NULL; |
673 | - |
674 | - spin_lock_irqsave(&obj->child_list_lock, flags); |
675 | - sync_timeline_get(obj); |
676 | - fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock, |
677 | - obj->context, value); |
678 | - list_add_tail(&pt->child_list, &obj->child_list_head); |
679 | - INIT_LIST_HEAD(&pt->active_list); |
680 | - spin_unlock_irqrestore(&obj->child_list_lock, flags); |
681 | - return pt; |
682 | -} |
683 | - |
684 | static const char *timeline_fence_get_driver_name(struct fence *fence) |
685 | { |
686 | return "sw_sync"; |
687 | @@ -203,13 +141,17 @@ static void timeline_fence_release(struct fence *fence) |
688 | { |
689 | struct sync_pt *pt = fence_to_sync_pt(fence); |
690 | struct sync_timeline *parent = fence_parent(fence); |
691 | - unsigned long flags; |
692 | |
693 | - spin_lock_irqsave(fence->lock, flags); |
694 | - list_del(&pt->child_list); |
695 | - if (!list_empty(&pt->active_list)) |
696 | - list_del(&pt->active_list); |
697 | - spin_unlock_irqrestore(fence->lock, flags); |
698 | + if (!list_empty(&pt->link)) { |
699 | + unsigned long flags; |
700 | + |
701 | + spin_lock_irqsave(fence->lock, flags); |
702 | + if (!list_empty(&pt->link)) { |
703 | + list_del(&pt->link); |
704 | + rb_erase(&pt->node, &parent->pt_tree); |
705 | + } |
706 | + spin_unlock_irqrestore(fence->lock, flags); |
707 | + } |
708 | |
709 | sync_timeline_put(parent); |
710 | fence_free(fence); |
711 | @@ -219,18 +161,11 @@ static bool timeline_fence_signaled(struct fence *fence) |
712 | { |
713 | struct sync_timeline *parent = fence_parent(fence); |
714 | |
715 | - return (fence->seqno > parent->value) ? false : true; |
716 | + return !__fence_is_later(fence->seqno, parent->value); |
717 | } |
718 | |
719 | static bool timeline_fence_enable_signaling(struct fence *fence) |
720 | { |
721 | - struct sync_pt *pt = fence_to_sync_pt(fence); |
722 | - struct sync_timeline *parent = fence_parent(fence); |
723 | - |
724 | - if (timeline_fence_signaled(fence)) |
725 | - return false; |
726 | - |
727 | - list_add_tail(&pt->active_list, &parent->active_list_head); |
728 | return true; |
729 | } |
730 | |
731 | @@ -259,6 +194,107 @@ static const struct fence_ops timeline_fence_ops = { |
732 | .timeline_value_str = timeline_fence_timeline_value_str, |
733 | }; |
734 | |
735 | +/** |
736 | + * sync_timeline_signal() - signal a status change on a sync_timeline |
737 | + * @obj: sync_timeline to signal |
738 | + * @inc: num to increment on timeline->value |
739 | + * |
740 | + * A sync implementation should call this any time one of it's fences |
741 | + * has signaled or has an error condition. |
742 | + */ |
743 | +static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) |
744 | +{ |
745 | + struct sync_pt *pt, *next; |
746 | + |
747 | + trace_sync_timeline(obj); |
748 | + |
749 | + spin_lock_irq(&obj->lock); |
750 | + |
751 | + obj->value += inc; |
752 | + |
753 | + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { |
754 | + if (!timeline_fence_signaled(&pt->base)) |
755 | + break; |
756 | + |
757 | + list_del_init(&pt->link); |
758 | + rb_erase(&pt->node, &obj->pt_tree); |
759 | + |
760 | + /* |
761 | + * A signal callback may release the last reference to this |
762 | + * fence, causing it to be freed. That operation has to be |
763 | + * last to avoid a use after free inside this loop, and must |
764 | + * be after we remove the fence from the timeline in order to |
765 | + * prevent deadlocking on timeline->lock inside |
766 | + * timeline_fence_release(). |
767 | + */ |
768 | + fence_signal_locked(&pt->base); |
769 | + } |
770 | + |
771 | + spin_unlock_irq(&obj->lock); |
772 | +} |
773 | + |
774 | +/** |
775 | + * sync_pt_create() - creates a sync pt |
776 | + * @parent: fence's parent sync_timeline |
777 | + * @inc: value of the fence |
778 | + * |
779 | + * Creates a new sync_pt as a child of @parent. @size bytes will be |
780 | + * allocated allowing for implementation specific data to be kept after |
781 | + * the generic sync_timeline struct. Returns the sync_pt object or |
782 | + * NULL in case of error. |
783 | + */ |
784 | +static struct sync_pt *sync_pt_create(struct sync_timeline *obj, |
785 | + unsigned int value) |
786 | +{ |
787 | + struct sync_pt *pt; |
788 | + |
789 | + pt = kzalloc(sizeof(*pt), GFP_KERNEL); |
790 | + if (!pt) |
791 | + return NULL; |
792 | + |
793 | + sync_timeline_get(obj); |
794 | + fence_init(&pt->base, &timeline_fence_ops, &obj->lock, |
795 | + obj->context, value); |
796 | + INIT_LIST_HEAD(&pt->link); |
797 | + |
798 | + spin_lock_irq(&obj->lock); |
799 | + if (!fence_is_signaled_locked(&pt->base)) { |
800 | + struct rb_node **p = &obj->pt_tree.rb_node; |
801 | + struct rb_node *parent = NULL; |
802 | + |
803 | + while (*p) { |
804 | + struct sync_pt *other; |
805 | + int cmp; |
806 | + |
807 | + parent = *p; |
808 | + other = rb_entry(parent, typeof(*pt), node); |
809 | + cmp = value - other->base.seqno; |
810 | + if (cmp > 0) { |
811 | + p = &parent->rb_right; |
812 | + } else if (cmp < 0) { |
813 | + p = &parent->rb_left; |
814 | + } else { |
815 | + if (fence_get_rcu(&other->base)) { |
816 | + fence_put(&pt->base); |
817 | + pt = other; |
818 | + goto unlock; |
819 | + } |
820 | + p = &parent->rb_left; |
821 | + } |
822 | + } |
823 | + rb_link_node(&pt->node, parent, p); |
824 | + rb_insert_color(&pt->node, &obj->pt_tree); |
825 | + |
826 | + parent = rb_next(&pt->node); |
827 | + list_add_tail(&pt->link, |
828 | + parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list); |
829 | + } |
830 | +unlock: |
831 | + spin_unlock_irq(&obj->lock); |
832 | + |
833 | + return pt; |
834 | +} |
835 | + |
836 | /* |
837 | * *WARNING* |
838 | * |
839 | @@ -285,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file) |
840 | static int sw_sync_debugfs_release(struct inode *inode, struct file *file) |
841 | { |
842 | struct sync_timeline *obj = file->private_data; |
843 | + struct sync_pt *pt, *next; |
844 | + |
845 | + spin_lock_irq(&obj->lock); |
846 | + |
847 | + list_for_each_entry_safe(pt, next, &obj->pt_list, link) { |
848 | + fence_set_error(&pt->base, -ENOENT); |
849 | + fence_signal_locked(&pt->base); |
850 | + } |
851 | |
852 | - smp_wmb(); |
853 | + spin_unlock_irq(&obj->lock); |
854 | |
855 | sync_timeline_put(obj); |
856 | return 0; |
857 | @@ -309,7 +353,7 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj, |
858 | goto err; |
859 | } |
860 | |
861 | - pt = sync_pt_create(obj, sizeof(*pt), data.value); |
862 | + pt = sync_pt_create(obj, data.value); |
863 | if (!pt) { |
864 | err = -ENOMEM; |
865 | goto err; |
866 | @@ -345,6 +389,11 @@ static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg) |
867 | if (copy_from_user(&value, (void __user *)arg, sizeof(value))) |
868 | return -EFAULT; |
869 | |
870 | + while (value > INT_MAX) { |
871 | + sync_timeline_signal(obj, INT_MAX); |
872 | + value -= INT_MAX; |
873 | + } |
874 | + |
875 | sync_timeline_signal(obj, value); |
876 | |
877 | return 0; |
878 | diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c |
879 | index 2dd4c3db6caa..858263dbecd4 100644 |
880 | --- a/drivers/dma-buf/sync_debug.c |
881 | +++ b/drivers/dma-buf/sync_debug.c |
882 | @@ -62,29 +62,29 @@ void sync_file_debug_remove(struct sync_file *sync_file) |
883 | |
884 | static const char *sync_status_str(int status) |
885 | { |
886 | - if (status == 0) |
887 | - return "signaled"; |
888 | + if (status < 0) |
889 | + return "error"; |
890 | |
891 | if (status > 0) |
892 | - return "active"; |
893 | + return "signaled"; |
894 | |
895 | - return "error"; |
896 | + return "active"; |
897 | } |
898 | |
899 | -static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) |
900 | +static void sync_print_fence(struct seq_file *s, |
901 | + struct fence *fence, bool show) |
902 | { |
903 | - int status = 1; |
904 | struct sync_timeline *parent = fence_parent(fence); |
905 | + int status; |
906 | |
907 | - if (fence_is_signaled_locked(fence)) |
908 | - status = fence->status; |
909 | + status = fence_get_status_locked(fence); |
910 | |
911 | seq_printf(s, " %s%sfence %s", |
912 | show ? parent->name : "", |
913 | show ? "_" : "", |
914 | sync_status_str(status)); |
915 | |
916 | - if (status <= 0) { |
917 | + if (status) { |
918 | struct timespec64 ts64 = |
919 | ktime_to_timespec64(fence->timestamp); |
920 | |
921 | @@ -116,17 +116,15 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show) |
922 | static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) |
923 | { |
924 | struct list_head *pos; |
925 | - unsigned long flags; |
926 | |
927 | seq_printf(s, "%s: %d\n", obj->name, obj->value); |
928 | |
929 | - spin_lock_irqsave(&obj->child_list_lock, flags); |
930 | - list_for_each(pos, &obj->child_list_head) { |
931 | - struct sync_pt *pt = |
932 | - container_of(pos, struct sync_pt, child_list); |
933 | + spin_lock_irq(&obj->lock); |
934 | + list_for_each(pos, &obj->pt_list) { |
935 | + struct sync_pt *pt = container_of(pos, struct sync_pt, link); |
936 | sync_print_fence(s, &pt->base, false); |
937 | } |
938 | - spin_unlock_irqrestore(&obj->child_list_lock, flags); |
939 | + spin_unlock_irq(&obj->lock); |
940 | } |
941 | |
942 | static void sync_print_sync_file(struct seq_file *s, |
943 | @@ -135,7 +133,7 @@ static void sync_print_sync_file(struct seq_file *s, |
944 | int i; |
945 | |
946 | seq_printf(s, "[%p] %s: %s\n", sync_file, sync_file->name, |
947 | - sync_status_str(!fence_is_signaled(sync_file->fence))); |
948 | + sync_status_str(fence_get_status(sync_file->fence))); |
949 | |
950 | if (fence_is_array(sync_file->fence)) { |
951 | struct fence_array *array = to_fence_array(sync_file->fence); |
952 | @@ -149,12 +147,11 @@ static void sync_print_sync_file(struct seq_file *s, |
953 | |
954 | static int sync_debugfs_show(struct seq_file *s, void *unused) |
955 | { |
956 | - unsigned long flags; |
957 | struct list_head *pos; |
958 | |
959 | seq_puts(s, "objs:\n--------------\n"); |
960 | |
961 | - spin_lock_irqsave(&sync_timeline_list_lock, flags); |
962 | + spin_lock_irq(&sync_timeline_list_lock); |
963 | list_for_each(pos, &sync_timeline_list_head) { |
964 | struct sync_timeline *obj = |
965 | container_of(pos, struct sync_timeline, |
966 | @@ -163,11 +160,11 @@ static int sync_debugfs_show(struct seq_file *s, void *unused) |
967 | sync_print_obj(s, obj); |
968 | seq_puts(s, "\n"); |
969 | } |
970 | - spin_unlock_irqrestore(&sync_timeline_list_lock, flags); |
971 | + spin_unlock_irq(&sync_timeline_list_lock); |
972 | |
973 | seq_puts(s, "fences:\n--------------\n"); |
974 | |
975 | - spin_lock_irqsave(&sync_file_list_lock, flags); |
976 | + spin_lock_irq(&sync_file_list_lock); |
977 | list_for_each(pos, &sync_file_list_head) { |
978 | struct sync_file *sync_file = |
979 | container_of(pos, struct sync_file, sync_file_list); |
980 | @@ -175,7 +172,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused) |
981 | sync_print_sync_file(s, sync_file); |
982 | seq_puts(s, "\n"); |
983 | } |
984 | - spin_unlock_irqrestore(&sync_file_list_lock, flags); |
985 | + spin_unlock_irq(&sync_file_list_lock); |
986 | return 0; |
987 | } |
988 | |
989 | diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h |
990 | index d269aa6783aa..9615dc0385b5 100644 |
991 | --- a/drivers/dma-buf/sync_debug.h |
992 | +++ b/drivers/dma-buf/sync_debug.h |
993 | @@ -14,6 +14,7 @@ |
994 | #define _LINUX_SYNC_H |
995 | |
996 | #include <linux/list.h> |
997 | +#include <linux/rbtree.h> |
998 | #include <linux/spinlock.h> |
999 | #include <linux/fence.h> |
1000 | |
1001 | @@ -24,43 +25,41 @@ |
1002 | * struct sync_timeline - sync object |
1003 | * @kref: reference count on fence. |
1004 | * @name: name of the sync_timeline. Useful for debugging |
1005 | - * @child_list_head: list of children sync_pts for this sync_timeline |
1006 | - * @child_list_lock: lock protecting @child_list_head and fence.status |
1007 | - * @active_list_head: list of active (unsignaled/errored) sync_pts |
1008 | + * @lock: lock protecting @pt_list and @value |
1009 | + * @pt_tree: rbtree of active (unsignaled/errored) sync_pts |
1010 | + * @pt_list: list of active (unsignaled/errored) sync_pts |
1011 | * @sync_timeline_list: membership in global sync_timeline_list |
1012 | */ |
1013 | struct sync_timeline { |
1014 | struct kref kref; |
1015 | char name[32]; |
1016 | |
1017 | - /* protected by child_list_lock */ |
1018 | + /* protected by lock */ |
1019 | u64 context; |
1020 | int value; |
1021 | |
1022 | - struct list_head child_list_head; |
1023 | - spinlock_t child_list_lock; |
1024 | - |
1025 | - struct list_head active_list_head; |
1026 | + struct rb_root pt_tree; |
1027 | + struct list_head pt_list; |
1028 | + spinlock_t lock; |
1029 | |
1030 | struct list_head sync_timeline_list; |
1031 | }; |
1032 | |
1033 | static inline struct sync_timeline *fence_parent(struct fence *fence) |
1034 | { |
1035 | - return container_of(fence->lock, struct sync_timeline, |
1036 | - child_list_lock); |
1037 | + return container_of(fence->lock, struct sync_timeline, lock); |
1038 | } |
1039 | |
1040 | /** |
1041 | * struct sync_pt - sync_pt object |
1042 | * @base: base fence object |
1043 | - * @child_list: sync timeline child's list |
1044 | - * @active_list: sync timeline active child's list |
1045 | + * @link: link on the sync timeline's list |
1046 | + * @node: node in the sync timeline's tree |
1047 | */ |
1048 | struct sync_pt { |
1049 | struct fence base; |
1050 | - struct list_head child_list; |
1051 | - struct list_head active_list; |
1052 | + struct list_head link; |
1053 | + struct rb_node node; |
1054 | }; |
1055 | |
1056 | #ifdef CONFIG_SW_SYNC |
1057 | diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c |
1058 | index b29a9e817320..f0c374d6ab40 100644 |
1059 | --- a/drivers/dma-buf/sync_file.c |
1060 | +++ b/drivers/dma-buf/sync_file.c |
1061 | @@ -67,9 +67,10 @@ static void fence_check_cb_func(struct fence *f, struct fence_cb *cb) |
1062 | * sync_file_create() - creates a sync file |
1063 | * @fence: fence to add to the sync_fence |
1064 | * |
1065 | - * Creates a sync_file containg @fence. Once this is called, the sync_file |
1066 | - * takes ownership of @fence. The sync_file can be released with |
1067 | - * fput(sync_file->file). Returns the sync_file or NULL in case of error. |
1068 | + * Creates a sync_file containg @fence. This function acquires and additional |
1069 | + * reference of @fence for the newly-created &sync_file, if it succeeds. The |
1070 | + * sync_file can be released with fput(sync_file->file). Returns the |
1071 | + * sync_file or NULL in case of error. |
1072 | */ |
1073 | struct sync_file *sync_file_create(struct fence *fence) |
1074 | { |
1075 | @@ -79,7 +80,7 @@ struct sync_file *sync_file_create(struct fence *fence) |
1076 | if (!sync_file) |
1077 | return NULL; |
1078 | |
1079 | - sync_file->fence = fence; |
1080 | + sync_file->fence = fence_get(fence); |
1081 | |
1082 | snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", |
1083 | fence->ops->get_driver_name(fence), |
1084 | @@ -90,13 +91,6 @@ struct sync_file *sync_file_create(struct fence *fence) |
1085 | } |
1086 | EXPORT_SYMBOL(sync_file_create); |
1087 | |
1088 | -/** |
1089 | - * sync_file_fdget() - get a sync_file from an fd |
1090 | - * @fd: fd referencing a fence |
1091 | - * |
1092 | - * Ensures @fd references a valid sync_file, increments the refcount of the |
1093 | - * backing file. Returns the sync_file or NULL in case of error. |
1094 | - */ |
1095 | static struct sync_file *sync_file_fdget(int fd) |
1096 | { |
1097 | struct file *file = fget(fd); |
1098 | @@ -377,10 +371,8 @@ static void sync_fill_fence_info(struct fence *fence, |
1099 | sizeof(info->obj_name)); |
1100 | strlcpy(info->driver_name, fence->ops->get_driver_name(fence), |
1101 | sizeof(info->driver_name)); |
1102 | - if (fence_is_signaled(fence)) |
1103 | - info->status = fence->status >= 0 ? 1 : fence->status; |
1104 | - else |
1105 | - info->status = 0; |
1106 | + |
1107 | + info->status = fence_get_status(fence); |
1108 | info->timestamp_ns = ktime_to_ns(fence->timestamp); |
1109 | } |
1110 | |
1111 | diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c |
1112 | index 9f3dbc8c63d2..fb2e7476d96b 100644 |
1113 | --- a/drivers/dma/pl330.c |
1114 | +++ b/drivers/dma/pl330.c |
1115 | @@ -1694,7 +1694,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i) |
1116 | static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) |
1117 | { |
1118 | struct pl330_thread *thrd = NULL; |
1119 | - unsigned long flags; |
1120 | int chans, i; |
1121 | |
1122 | if (pl330->state == DYING) |
1123 | @@ -1702,8 +1701,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) |
1124 | |
1125 | chans = pl330->pcfg.num_chan; |
1126 | |
1127 | - spin_lock_irqsave(&pl330->lock, flags); |
1128 | - |
1129 | for (i = 0; i < chans; i++) { |
1130 | thrd = &pl330->channels[i]; |
1131 | if ((thrd->free) && (!_manager_ns(thrd) || |
1132 | @@ -1721,8 +1718,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) |
1133 | thrd = NULL; |
1134 | } |
1135 | |
1136 | - spin_unlock_irqrestore(&pl330->lock, flags); |
1137 | - |
1138 | return thrd; |
1139 | } |
1140 | |
1141 | @@ -1740,7 +1735,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev) |
1142 | static void pl330_release_channel(struct pl330_thread *thrd) |
1143 | { |
1144 | struct pl330_dmac *pl330; |
1145 | - unsigned long flags; |
1146 | |
1147 | if (!thrd || thrd->free) |
1148 | return; |
1149 | @@ -1752,10 +1746,8 @@ static void pl330_release_channel(struct pl330_thread *thrd) |
1150 | |
1151 | pl330 = thrd->dmac; |
1152 | |
1153 | - spin_lock_irqsave(&pl330->lock, flags); |
1154 | _free_event(thrd, thrd->ev); |
1155 | thrd->free = true; |
1156 | - spin_unlock_irqrestore(&pl330->lock, flags); |
1157 | } |
1158 | |
1159 | /* Initialize the structure for PL330 configuration, that can be used |
1160 | @@ -2120,20 +2112,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) |
1161 | struct pl330_dmac *pl330 = pch->dmac; |
1162 | unsigned long flags; |
1163 | |
1164 | - spin_lock_irqsave(&pch->lock, flags); |
1165 | + spin_lock_irqsave(&pl330->lock, flags); |
1166 | |
1167 | dma_cookie_init(chan); |
1168 | pch->cyclic = false; |
1169 | |
1170 | pch->thread = pl330_request_channel(pl330); |
1171 | if (!pch->thread) { |
1172 | - spin_unlock_irqrestore(&pch->lock, flags); |
1173 | + spin_unlock_irqrestore(&pl330->lock, flags); |
1174 | return -ENOMEM; |
1175 | } |
1176 | |
1177 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); |
1178 | |
1179 | - spin_unlock_irqrestore(&pch->lock, flags); |
1180 | + spin_unlock_irqrestore(&pl330->lock, flags); |
1181 | |
1182 | return 1; |
1183 | } |
1184 | @@ -2236,12 +2228,13 @@ static int pl330_pause(struct dma_chan *chan) |
1185 | static void pl330_free_chan_resources(struct dma_chan *chan) |
1186 | { |
1187 | struct dma_pl330_chan *pch = to_pchan(chan); |
1188 | + struct pl330_dmac *pl330 = pch->dmac; |
1189 | unsigned long flags; |
1190 | |
1191 | tasklet_kill(&pch->task); |
1192 | |
1193 | pm_runtime_get_sync(pch->dmac->ddma.dev); |
1194 | - spin_lock_irqsave(&pch->lock, flags); |
1195 | + spin_lock_irqsave(&pl330->lock, flags); |
1196 | |
1197 | pl330_release_channel(pch->thread); |
1198 | pch->thread = NULL; |
1199 | @@ -2249,7 +2242,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan) |
1200 | if (pch->cyclic) |
1201 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); |
1202 | |
1203 | - spin_unlock_irqrestore(&pch->lock, flags); |
1204 | + spin_unlock_irqrestore(&pl330->lock, flags); |
1205 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); |
1206 | pm_runtime_put_autosuspend(pch->dmac->ddma.dev); |
1207 | } |
1208 | diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c |
1209 | index 307547f4848d..ae3f60be7759 100644 |
1210 | --- a/drivers/dma/stm32-dma.c |
1211 | +++ b/drivers/dma/stm32-dma.c |
1212 | @@ -884,7 +884,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, |
1213 | struct virt_dma_desc *vdesc; |
1214 | enum dma_status status; |
1215 | unsigned long flags; |
1216 | - u32 residue; |
1217 | + u32 residue = 0; |
1218 | |
1219 | status = dma_cookie_status(c, cookie, state); |
1220 | if ((status == DMA_COMPLETE) || (!state)) |
1221 | @@ -892,16 +892,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, |
1222 | |
1223 | spin_lock_irqsave(&chan->vchan.lock, flags); |
1224 | vdesc = vchan_find_desc(&chan->vchan, cookie); |
1225 | - if (cookie == chan->desc->vdesc.tx.cookie) { |
1226 | + if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) |
1227 | residue = stm32_dma_desc_residue(chan, chan->desc, |
1228 | chan->next_sg); |
1229 | - } else if (vdesc) { |
1230 | + else if (vdesc) |
1231 | residue = stm32_dma_desc_residue(chan, |
1232 | to_stm32_dma_desc(vdesc), 0); |
1233 | - } else { |
1234 | - residue = 0; |
1235 | - } |
1236 | - |
1237 | dma_set_residue(state, residue); |
1238 | |
1239 | spin_unlock_irqrestore(&chan->vchan.lock, flags); |
1240 | @@ -976,21 +972,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, |
1241 | struct stm32_dma_chan *chan; |
1242 | struct dma_chan *c; |
1243 | |
1244 | - if (dma_spec->args_count < 3) |
1245 | + if (dma_spec->args_count < 4) |
1246 | return NULL; |
1247 | |
1248 | cfg.channel_id = dma_spec->args[0]; |
1249 | cfg.request_line = dma_spec->args[1]; |
1250 | cfg.stream_config = dma_spec->args[2]; |
1251 | - cfg.threshold = 0; |
1252 | + cfg.threshold = dma_spec->args[3]; |
1253 | |
1254 | if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >= |
1255 | STM32_DMA_MAX_REQUEST_ID)) |
1256 | return NULL; |
1257 | |
1258 | - if (dma_spec->args_count > 3) |
1259 | - cfg.threshold = dma_spec->args[3]; |
1260 | - |
1261 | chan = &dmadev->chan[cfg.channel_id]; |
1262 | |
1263 | c = dma_get_slave_channel(&chan->vchan.chan); |
1264 | diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c |
1265 | index 54775221a01f..3c47e6361d81 100644 |
1266 | --- a/drivers/edac/sb_edac.c |
1267 | +++ b/drivers/edac/sb_edac.c |
1268 | @@ -2510,6 +2510,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, |
1269 | break; |
1270 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: |
1271 | pvt->pci_ta = pdev; |
1272 | + break; |
1273 | case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: |
1274 | pvt->pci_ras = pdev; |
1275 | break; |
1276 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c |
1277 | index c2bd9f045532..6d75fd0e3105 100644 |
1278 | --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c |
1279 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c |
1280 | @@ -565,11 +565,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = |
1281 | |
1282 | static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) |
1283 | { |
1284 | - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); |
1285 | - |
1286 | - kfree(amdgpu_encoder->enc_priv); |
1287 | drm_encoder_cleanup(encoder); |
1288 | - kfree(amdgpu_encoder); |
1289 | + kfree(encoder); |
1290 | } |
1291 | |
1292 | static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { |
1293 | diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c |
1294 | index 50f0cf2788b7..7522f796f19b 100644 |
1295 | --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c |
1296 | +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c |
1297 | @@ -182,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, |
1298 | WREG32(mmVCE_UENC_CLOCK_GATING_2, data); |
1299 | |
1300 | data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); |
1301 | - data &= ~0xffc00000; |
1302 | + data &= ~0x3ff; |
1303 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); |
1304 | |
1305 | data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); |
1306 | diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c |
1307 | index 6ca1f3117fe8..6dd09c306bc1 100644 |
1308 | --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c |
1309 | +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c |
1310 | @@ -46,7 +46,8 @@ enum decon_flag_bits { |
1311 | BIT_CLKS_ENABLED, |
1312 | BIT_IRQS_ENABLED, |
1313 | BIT_WIN_UPDATED, |
1314 | - BIT_SUSPENDED |
1315 | + BIT_SUSPENDED, |
1316 | + BIT_REQUEST_UPDATE |
1317 | }; |
1318 | |
1319 | struct decon_context { |
1320 | @@ -315,6 +316,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, |
1321 | |
1322 | /* window enable */ |
1323 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); |
1324 | + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); |
1325 | } |
1326 | |
1327 | static void decon_disable_plane(struct exynos_drm_crtc *crtc, |
1328 | @@ -327,6 +329,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, |
1329 | return; |
1330 | |
1331 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); |
1332 | + set_bit(BIT_REQUEST_UPDATE, &ctx->flags); |
1333 | } |
1334 | |
1335 | static void decon_atomic_flush(struct exynos_drm_crtc *crtc) |
1336 | @@ -340,8 +343,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) |
1337 | for (i = ctx->first_win; i < WINDOWS_NR; i++) |
1338 | decon_shadow_protect_win(ctx, i, false); |
1339 | |
1340 | - /* standalone update */ |
1341 | - decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); |
1342 | + if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) |
1343 | + decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); |
1344 | |
1345 | if (ctx->out_type & IFTYPE_I80) |
1346 | set_bit(BIT_WIN_UPDATED, &ctx->flags); |
1347 | diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c |
1348 | index cc2fde2ae5ef..c9eef0f51d31 100644 |
1349 | --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c |
1350 | +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c |
1351 | @@ -243,7 +243,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) |
1352 | return PTR_ERR(fsl_dev->state); |
1353 | } |
1354 | |
1355 | - clk_disable_unprepare(fsl_dev->pix_clk); |
1356 | clk_disable_unprepare(fsl_dev->clk); |
1357 | |
1358 | return 0; |
1359 | @@ -266,6 +265,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) |
1360 | if (fsl_dev->tcon) |
1361 | fsl_tcon_bypass_enable(fsl_dev->tcon); |
1362 | fsl_dcu_drm_init_planes(fsl_dev->drm); |
1363 | + enable_irq(fsl_dev->irq); |
1364 | drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); |
1365 | |
1366 | console_lock(); |
1367 | @@ -273,7 +273,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) |
1368 | console_unlock(); |
1369 | |
1370 | drm_kms_helper_poll_enable(fsl_dev->drm); |
1371 | - enable_irq(fsl_dev->irq); |
1372 | |
1373 | return 0; |
1374 | } |
1375 | diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c |
1376 | index 686971263bef..45d6771fac8c 100644 |
1377 | --- a/drivers/i2c/busses/i2c-cadence.c |
1378 | +++ b/drivers/i2c/busses/i2c-cadence.c |
1379 | @@ -962,10 +962,6 @@ static int cdns_i2c_probe(struct platform_device *pdev) |
1380 | goto err_clk_dis; |
1381 | } |
1382 | |
1383 | - ret = i2c_add_adapter(&id->adap); |
1384 | - if (ret < 0) |
1385 | - goto err_clk_dis; |
1386 | - |
1387 | /* |
1388 | * Cadence I2C controller has a bug wherein it generates |
1389 | * invalid read transaction after HW timeout in master receiver mode. |
1390 | @@ -975,6 +971,10 @@ static int cdns_i2c_probe(struct platform_device *pdev) |
1391 | */ |
1392 | cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET); |
1393 | |
1394 | + ret = i2c_add_adapter(&id->adap); |
1395 | + if (ret < 0) |
1396 | + goto err_clk_dis; |
1397 | + |
1398 | dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", |
1399 | id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); |
1400 | |
1401 | diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c |
1402 | index 472641fc890c..af05e20c986b 100644 |
1403 | --- a/drivers/iio/adc/ti-ads1015.c |
1404 | +++ b/drivers/iio/adc/ti-ads1015.c |
1405 | @@ -269,6 +269,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val) |
1406 | |
1407 | conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr_old]); |
1408 | conv_time += DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]); |
1409 | + conv_time += conv_time / 10; /* 10% internal clock inaccuracy */ |
1410 | usleep_range(conv_time, conv_time + 1); |
1411 | data->conv_invalid = false; |
1412 | } |
1413 | diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c |
1414 | index 63890ebb72bd..eccf7039aaca 100644 |
1415 | --- a/drivers/infiniband/hw/qedr/qedr_cm.c |
1416 | +++ b/drivers/infiniband/hw/qedr/qedr_cm.c |
1417 | @@ -404,9 +404,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev, |
1418 | } |
1419 | |
1420 | if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) |
1421 | - packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; |
1422 | - else |
1423 | packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; |
1424 | + else |
1425 | + packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; |
1426 | |
1427 | packet->roce_mode = roce_mode; |
1428 | memcpy(packet->header.vaddr, ud_header_buffer, header_size); |
1429 | diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c |
1430 | index 4ba019e3dc56..35d5b89decb4 100644 |
1431 | --- a/drivers/infiniband/hw/qedr/verbs.c |
1432 | +++ b/drivers/infiniband/hw/qedr/verbs.c |
1433 | @@ -1653,7 +1653,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev, |
1434 | int status = 0; |
1435 | |
1436 | if (new_state == qp->state) |
1437 | - return 1; |
1438 | + return 0; |
1439 | |
1440 | switch (qp->state) { |
1441 | case QED_ROCE_QP_STATE_RESET: |
1442 | diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c |
1443 | index e0f1c6d534fe..ab8a1b36af21 100644 |
1444 | --- a/drivers/md/bcache/request.c |
1445 | +++ b/drivers/md/bcache/request.c |
1446 | @@ -703,7 +703,14 @@ static void cached_dev_read_error(struct closure *cl) |
1447 | struct search *s = container_of(cl, struct search, cl); |
1448 | struct bio *bio = &s->bio.bio; |
1449 | |
1450 | - if (s->recoverable) { |
1451 | + /* |
1452 | + * If read request hit dirty data (s->read_dirty_data is true), |
1453 | + * then recovery a failed read request from cached device may |
1454 | + * get a stale data back. So read failure recovery is only |
1455 | + * permitted when read request hit clean data in cache device, |
1456 | + * or when cache read race happened. |
1457 | + */ |
1458 | + if (s->recoverable && !s->read_dirty_data) { |
1459 | /* Retry from the backing device: */ |
1460 | trace_bcache_read_retry(s->orig_bio); |
1461 | |
1462 | diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c |
1463 | index 90ed2e12d345..80c89a31d790 100644 |
1464 | --- a/drivers/mmc/host/sdhci-msm.c |
1465 | +++ b/drivers/mmc/host/sdhci-msm.c |
1466 | @@ -642,6 +642,21 @@ static int sdhci_msm_probe(struct platform_device *pdev) |
1467 | CORE_VENDOR_SPEC_CAPABILITIES0); |
1468 | } |
1469 | |
1470 | + /* |
1471 | + * Power on reset state may trigger power irq if previous status of |
1472 | + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq |
1473 | + * interrupt in GIC, any pending power irq interrupt should be |
1474 | + * acknowledged. Otherwise power irq interrupt handler would be |
1475 | + * fired prematurely. |
1476 | + */ |
1477 | + sdhci_msm_voltage_switch(host); |
1478 | + |
1479 | + /* |
1480 | + * Ensure that above writes are propogated before interrupt enablement |
1481 | + * in GIC. |
1482 | + */ |
1483 | + mb(); |
1484 | + |
1485 | /* Setup IRQ for handling power/voltage tasks with PMIC */ |
1486 | msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); |
1487 | if (msm_host->pwr_irq < 0) { |
1488 | @@ -651,6 +666,9 @@ static int sdhci_msm_probe(struct platform_device *pdev) |
1489 | goto clk_disable; |
1490 | } |
1491 | |
1492 | + /* Enable pwr irq interrupts */ |
1493 | + writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); |
1494 | + |
1495 | ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, |
1496 | sdhci_msm_pwr_irq, IRQF_ONESHOT, |
1497 | dev_name(&pdev->dev), host); |
1498 | diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c |
1499 | index e90c6a7333d7..2e4649655181 100644 |
1500 | --- a/drivers/net/appletalk/ipddp.c |
1501 | +++ b/drivers/net/appletalk/ipddp.c |
1502 | @@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev) |
1503 | */ |
1504 | static int ipddp_create(struct ipddp_route *new_rt) |
1505 | { |
1506 | - struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL); |
1507 | + struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL); |
1508 | |
1509 | if (rt == NULL) |
1510 | return -ENOMEM; |
1511 | diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c |
1512 | index be7ec5a76a54..744ed6ddaf37 100644 |
1513 | --- a/drivers/net/ethernet/broadcom/bcmsysport.c |
1514 | +++ b/drivers/net/ethernet/broadcom/bcmsysport.c |
1515 | @@ -1023,15 +1023,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, |
1516 | goto out; |
1517 | } |
1518 | |
1519 | - /* Insert TSB and checksum infos */ |
1520 | - if (priv->tsb_en) { |
1521 | - skb = bcm_sysport_insert_tsb(skb, dev); |
1522 | - if (!skb) { |
1523 | - ret = NETDEV_TX_OK; |
1524 | - goto out; |
1525 | - } |
1526 | - } |
1527 | - |
1528 | /* The Ethernet switch we are interfaced with needs packets to be at |
1529 | * least 64 bytes (including FCS) otherwise they will be discarded when |
1530 | * they enter the switch port logic. When Broadcom tags are enabled, we |
1531 | @@ -1039,13 +1030,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, |
1532 | * (including FCS and tag) because the length verification is done after |
1533 | * the Broadcom tag is stripped off the ingress packet. |
1534 | */ |
1535 | - if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { |
1536 | + if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { |
1537 | ret = NETDEV_TX_OK; |
1538 | goto out; |
1539 | } |
1540 | |
1541 | - skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? |
1542 | - ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; |
1543 | + /* Insert TSB and checksum infos */ |
1544 | + if (priv->tsb_en) { |
1545 | + skb = bcm_sysport_insert_tsb(skb, dev); |
1546 | + if (!skb) { |
1547 | + ret = NETDEV_TX_OK; |
1548 | + goto out; |
1549 | + } |
1550 | + } |
1551 | + |
1552 | + skb_len = skb->len; |
1553 | |
1554 | mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); |
1555 | if (dma_mapping_error(kdev, mapping)) { |
1556 | diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c |
1557 | index 67befedef709..578c7f8f11bf 100644 |
1558 | --- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c |
1559 | +++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c |
1560 | @@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed) |
1561 | int speed = 2; |
1562 | |
1563 | if (!xcv) { |
1564 | - dev_err(&xcv->pdev->dev, |
1565 | - "XCV init not done, probe may have failed\n"); |
1566 | + pr_err("XCV init not done, probe may have failed\n"); |
1567 | return; |
1568 | } |
1569 | |
1570 | diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c |
1571 | index 0f0de5b63622..d04a6c163445 100644 |
1572 | --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c |
1573 | +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c |
1574 | @@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi, |
1575 | if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) |
1576 | fl6.flowi6_oif = sin6_scope_id; |
1577 | dst = ip6_route_output(&init_net, NULL, &fl6); |
1578 | - if (!dst) |
1579 | - goto out; |
1580 | - if (!cxgb_our_interface(lldi, get_real_dev, |
1581 | - ip6_dst_idev(dst)->dev) && |
1582 | - !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { |
1583 | + if (dst->error || |
1584 | + (!cxgb_our_interface(lldi, get_real_dev, |
1585 | + ip6_dst_idev(dst)->dev) && |
1586 | + !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) { |
1587 | dst_release(dst); |
1588 | - dst = NULL; |
1589 | + return NULL; |
1590 | } |
1591 | } |
1592 | |
1593 | -out: |
1594 | return dst; |
1595 | } |
1596 | EXPORT_SYMBOL(cxgb_find_route6); |
1597 | diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c |
1598 | index 5626908f3f7a..1644896568c4 100644 |
1599 | --- a/drivers/net/ethernet/emulex/benet/be_main.c |
1600 | +++ b/drivers/net/ethernet/emulex/benet/be_main.c |
1601 | @@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac) |
1602 | |
1603 | /* Check if mac has already been added as part of uc-list */ |
1604 | for (i = 0; i < adapter->uc_macs; i++) { |
1605 | - if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN], |
1606 | - mac)) { |
1607 | + if (ether_addr_equal(adapter->uc_list[i].mac, mac)) { |
1608 | /* mac already added, skip addition */ |
1609 | adapter->pmac_id[0] = adapter->pmac_id[i + 1]; |
1610 | return 0; |
1611 | @@ -363,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) |
1612 | status = -EPERM; |
1613 | goto err; |
1614 | } |
1615 | -done: |
1616 | + |
1617 | + /* Remember currently programmed MAC */ |
1618 | ether_addr_copy(adapter->dev_mac, addr->sa_data); |
1619 | +done: |
1620 | ether_addr_copy(netdev->dev_addr, addr->sa_data); |
1621 | dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); |
1622 | return 0; |
1623 | @@ -1679,14 +1680,12 @@ static void be_clear_mc_list(struct be_adapter *adapter) |
1624 | |
1625 | static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx) |
1626 | { |
1627 | - if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN], |
1628 | - adapter->dev_mac)) { |
1629 | + if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) { |
1630 | adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0]; |
1631 | return 0; |
1632 | } |
1633 | |
1634 | - return be_cmd_pmac_add(adapter, |
1635 | - (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN], |
1636 | + return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac, |
1637 | adapter->if_handle, |
1638 | &adapter->pmac_id[uc_idx + 1], 0); |
1639 | } |
1640 | @@ -1722,9 +1721,8 @@ static void be_set_uc_list(struct be_adapter *adapter) |
1641 | } |
1642 | |
1643 | if (adapter->update_uc_list) { |
1644 | - i = 1; /* First slot is claimed by the Primary MAC */ |
1645 | - |
1646 | /* cache the uc-list in adapter array */ |
1647 | + i = 0; |
1648 | netdev_for_each_uc_addr(ha, netdev) { |
1649 | ether_addr_copy(adapter->uc_list[i].mac, ha->addr); |
1650 | i++; |
1651 | @@ -3639,8 +3637,10 @@ static void be_disable_if_filters(struct be_adapter *adapter) |
1652 | { |
1653 | /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ |
1654 | if (!BEx_chip(adapter) || !be_virtfn(adapter) || |
1655 | - check_privilege(adapter, BE_PRIV_FILTMGMT)) |
1656 | + check_privilege(adapter, BE_PRIV_FILTMGMT)) { |
1657 | be_dev_mac_del(adapter, adapter->pmac_id[0]); |
1658 | + eth_zero_addr(adapter->dev_mac); |
1659 | + } |
1660 | |
1661 | be_clear_uc_list(adapter); |
1662 | be_clear_mc_list(adapter); |
1663 | @@ -3794,12 +3794,27 @@ static int be_enable_if_filters(struct be_adapter *adapter) |
1664 | if (status) |
1665 | return status; |
1666 | |
1667 | - /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ |
1668 | - if (!BEx_chip(adapter) || !be_virtfn(adapter) || |
1669 | - check_privilege(adapter, BE_PRIV_FILTMGMT)) { |
1670 | + /* Normally this condition usually true as the ->dev_mac is zeroed. |
1671 | + * But on BE3 VFs the initial MAC is pre-programmed by PF and |
1672 | + * subsequent be_dev_mac_add() can fail (after fresh boot) |
1673 | + */ |
1674 | + if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) { |
1675 | + int old_pmac_id = -1; |
1676 | + |
1677 | + /* Remember old programmed MAC if any - can happen on BE3 VF */ |
1678 | + if (!is_zero_ether_addr(adapter->dev_mac)) |
1679 | + old_pmac_id = adapter->pmac_id[0]; |
1680 | + |
1681 | status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); |
1682 | if (status) |
1683 | return status; |
1684 | + |
1685 | + /* Delete the old programmed MAC as we successfully programmed |
1686 | + * a new MAC |
1687 | + */ |
1688 | + if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0]) |
1689 | + be_dev_mac_del(adapter, old_pmac_id); |
1690 | + |
1691 | ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); |
1692 | } |
1693 | |
1694 | @@ -4573,6 +4588,10 @@ static int be_mac_setup(struct be_adapter *adapter) |
1695 | |
1696 | memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); |
1697 | memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); |
1698 | + |
1699 | + /* Initial MAC for BE3 VFs is already programmed by PF */ |
1700 | + if (BEx_chip(adapter) && be_virtfn(adapter)) |
1701 | + memcpy(adapter->dev_mac, mac, ETH_ALEN); |
1702 | } |
1703 | |
1704 | return 0; |
1705 | diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c |
1706 | index 12aef1b15356..849b8712ec81 100644 |
1707 | --- a/drivers/net/ethernet/freescale/fec_main.c |
1708 | +++ b/drivers/net/ethernet/freescale/fec_main.c |
1709 | @@ -2923,6 +2923,7 @@ static void set_multicast_list(struct net_device *ndev) |
1710 | struct netdev_hw_addr *ha; |
1711 | unsigned int i, bit, data, crc, tmp; |
1712 | unsigned char hash; |
1713 | + unsigned int hash_high = 0, hash_low = 0; |
1714 | |
1715 | if (ndev->flags & IFF_PROMISC) { |
1716 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
1717 | @@ -2945,11 +2946,7 @@ static void set_multicast_list(struct net_device *ndev) |
1718 | return; |
1719 | } |
1720 | |
1721 | - /* Clear filter and add the addresses in hash register |
1722 | - */ |
1723 | - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1724 | - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1725 | - |
1726 | + /* Add the addresses in hash register */ |
1727 | netdev_for_each_mc_addr(ha, ndev) { |
1728 | /* calculate crc32 value of mac address */ |
1729 | crc = 0xffffffff; |
1730 | @@ -2967,16 +2964,14 @@ static void set_multicast_list(struct net_device *ndev) |
1731 | */ |
1732 | hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; |
1733 | |
1734 | - if (hash > 31) { |
1735 | - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1736 | - tmp |= 1 << (hash - 32); |
1737 | - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1738 | - } else { |
1739 | - tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1740 | - tmp |= 1 << hash; |
1741 | - writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1742 | - } |
1743 | + if (hash > 31) |
1744 | + hash_high |= 1 << (hash - 32); |
1745 | + else |
1746 | + hash_low |= 1 << hash; |
1747 | } |
1748 | + |
1749 | + writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1750 | + writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1751 | } |
1752 | |
1753 | /* Set a MAC change in hardware. */ |
1754 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c |
1755 | index d4d97ca12e83..f9897d17f01d 100644 |
1756 | --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c |
1757 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c |
1758 | @@ -251,13 +251,9 @@ static u32 freq_to_shift(u16 freq) |
1759 | { |
1760 | u32 freq_khz = freq * 1000; |
1761 | u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC; |
1762 | - u64 tmp_rounded = |
1763 | - roundup_pow_of_two(max_val_cycles) > max_val_cycles ? |
1764 | - roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX; |
1765 | - u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ? |
1766 | - max_val_cycles : tmp_rounded; |
1767 | + u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1); |
1768 | /* calculate max possible multiplier in order to fit in 64bit */ |
1769 | - u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded); |
1770 | + u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded); |
1771 | |
1772 | /* This comes from the reverse of clocksource_khz2mult */ |
1773 | return ilog2(div_u64(max_mul * freq_khz, 1000000)); |
1774 | diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c |
1775 | index 11623aad0e8e..10d3a9f6349e 100644 |
1776 | --- a/drivers/net/ethernet/renesas/ravb_main.c |
1777 | +++ b/drivers/net/ethernet/renesas/ravb_main.c |
1778 | @@ -941,14 +941,10 @@ static int ravb_poll(struct napi_struct *napi, int budget) |
1779 | /* Receive error message handling */ |
1780 | priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; |
1781 | priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; |
1782 | - if (priv->rx_over_errors != ndev->stats.rx_over_errors) { |
1783 | + if (priv->rx_over_errors != ndev->stats.rx_over_errors) |
1784 | ndev->stats.rx_over_errors = priv->rx_over_errors; |
1785 | - netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n"); |
1786 | - } |
1787 | - if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) { |
1788 | + if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) |
1789 | ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; |
1790 | - netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n"); |
1791 | - } |
1792 | out: |
1793 | return budget - quota; |
1794 | } |
1795 | diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c |
1796 | index cebde074d196..cb206e5526c4 100644 |
1797 | --- a/drivers/net/gtp.c |
1798 | +++ b/drivers/net/gtp.c |
1799 | @@ -69,7 +69,6 @@ struct gtp_dev { |
1800 | struct socket *sock0; |
1801 | struct socket *sock1u; |
1802 | |
1803 | - struct net *net; |
1804 | struct net_device *dev; |
1805 | |
1806 | unsigned int hash_size; |
1807 | @@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb) |
1808 | |
1809 | netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk); |
1810 | |
1811 | - xnet = !net_eq(gtp->net, dev_net(gtp->dev)); |
1812 | + xnet = !net_eq(sock_net(sk), dev_net(gtp->dev)); |
1813 | |
1814 | switch (udp_sk(sk)->encap_type) { |
1815 | case UDP_ENCAP_GTP0: |
1816 | @@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
1817 | pktinfo.fl4.saddr, pktinfo.fl4.daddr, |
1818 | pktinfo.iph->tos, |
1819 | ip4_dst_hoplimit(&pktinfo.rt->dst), |
1820 | - htons(IP_DF), |
1821 | + 0, |
1822 | pktinfo.gtph_port, pktinfo.gtph_port, |
1823 | true, false); |
1824 | break; |
1825 | @@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev) |
1826 | static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize); |
1827 | static void gtp_hashtable_free(struct gtp_dev *gtp); |
1828 | static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, |
1829 | - int fd_gtp0, int fd_gtp1, struct net *src_net); |
1830 | + int fd_gtp0, int fd_gtp1); |
1831 | |
1832 | static int gtp_newlink(struct net *src_net, struct net_device *dev, |
1833 | struct nlattr *tb[], struct nlattr *data[]) |
1834 | @@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, |
1835 | fd0 = nla_get_u32(data[IFLA_GTP_FD0]); |
1836 | fd1 = nla_get_u32(data[IFLA_GTP_FD1]); |
1837 | |
1838 | - err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net); |
1839 | + err = gtp_encap_enable(dev, gtp, fd0, fd1); |
1840 | if (err < 0) |
1841 | goto out_err; |
1842 | |
1843 | @@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp) |
1844 | } |
1845 | |
1846 | static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, |
1847 | - int fd_gtp0, int fd_gtp1, struct net *src_net) |
1848 | + int fd_gtp0, int fd_gtp1) |
1849 | { |
1850 | struct udp_tunnel_sock_cfg tuncfg = {NULL}; |
1851 | struct socket *sock0, *sock1u; |
1852 | @@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp, |
1853 | |
1854 | gtp->sock0 = sock0; |
1855 | gtp->sock1u = sock1u; |
1856 | - gtp->net = src_net; |
1857 | |
1858 | tuncfg.sk_user_data = gtp; |
1859 | tuncfg.encap_rcv = gtp_encap_recv; |
1860 | diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c |
1861 | index 222918828655..fbf5945ce00d 100644 |
1862 | --- a/drivers/net/phy/micrel.c |
1863 | +++ b/drivers/net/phy/micrel.c |
1864 | @@ -1020,7 +1020,7 @@ static struct phy_driver ksphy_driver[] = { |
1865 | .phy_id = PHY_ID_KSZ8795, |
1866 | .phy_id_mask = MICREL_PHY_ID_MASK, |
1867 | .name = "Micrel KSZ8795", |
1868 | - .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), |
1869 | + .features = PHY_BASIC_FEATURES, |
1870 | .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, |
1871 | .config_init = kszphy_config_init, |
1872 | .config_aneg = ksz8873mll_config_aneg, |
1873 | diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h |
1874 | index cb7365bdf6e0..5b1d2e8402d9 100644 |
1875 | --- a/drivers/net/xen-netback/common.h |
1876 | +++ b/drivers/net/xen-netback/common.h |
1877 | @@ -113,10 +113,10 @@ struct xenvif_stats { |
1878 | * A subset of struct net_device_stats that contains only the |
1879 | * fields that are updated in netback.c for each queue. |
1880 | */ |
1881 | - unsigned int rx_bytes; |
1882 | - unsigned int rx_packets; |
1883 | - unsigned int tx_bytes; |
1884 | - unsigned int tx_packets; |
1885 | + u64 rx_bytes; |
1886 | + u64 rx_packets; |
1887 | + u64 tx_bytes; |
1888 | + u64 tx_packets; |
1889 | |
1890 | /* Additional stats used by xenvif */ |
1891 | unsigned long rx_gso_checksum_fixup; |
1892 | diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c |
1893 | index 5bfaf5578810..618013e7f87b 100644 |
1894 | --- a/drivers/net/xen-netback/interface.c |
1895 | +++ b/drivers/net/xen-netback/interface.c |
1896 | @@ -225,10 +225,10 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
1897 | { |
1898 | struct xenvif *vif = netdev_priv(dev); |
1899 | struct xenvif_queue *queue = NULL; |
1900 | - unsigned long rx_bytes = 0; |
1901 | - unsigned long rx_packets = 0; |
1902 | - unsigned long tx_bytes = 0; |
1903 | - unsigned long tx_packets = 0; |
1904 | + u64 rx_bytes = 0; |
1905 | + u64 rx_packets = 0; |
1906 | + u64 tx_bytes = 0; |
1907 | + u64 tx_packets = 0; |
1908 | unsigned int index; |
1909 | |
1910 | spin_lock(&vif->lock); |
1911 | diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
1912 | index cd442e46afb4..8d498a997e25 100644 |
1913 | --- a/drivers/net/xen-netfront.c |
1914 | +++ b/drivers/net/xen-netfront.c |
1915 | @@ -1854,27 +1854,19 @@ static int talk_to_netback(struct xenbus_device *dev, |
1916 | xennet_destroy_queues(info); |
1917 | |
1918 | err = xennet_create_queues(info, &num_queues); |
1919 | - if (err < 0) |
1920 | - goto destroy_ring; |
1921 | + if (err < 0) { |
1922 | + xenbus_dev_fatal(dev, err, "creating queues"); |
1923 | + kfree(info->queues); |
1924 | + info->queues = NULL; |
1925 | + goto out; |
1926 | + } |
1927 | |
1928 | /* Create shared ring, alloc event channel -- for each queue */ |
1929 | for (i = 0; i < num_queues; ++i) { |
1930 | queue = &info->queues[i]; |
1931 | err = setup_netfront(dev, queue, feature_split_evtchn); |
1932 | - if (err) { |
1933 | - /* setup_netfront() will tidy up the current |
1934 | - * queue on error, but we need to clean up |
1935 | - * those already allocated. |
1936 | - */ |
1937 | - if (i > 0) { |
1938 | - rtnl_lock(); |
1939 | - netif_set_real_num_tx_queues(info->netdev, i); |
1940 | - rtnl_unlock(); |
1941 | - goto destroy_ring; |
1942 | - } else { |
1943 | - goto out; |
1944 | - } |
1945 | - } |
1946 | + if (err) |
1947 | + goto destroy_ring; |
1948 | } |
1949 | |
1950 | again: |
1951 | @@ -1964,9 +1956,9 @@ static int talk_to_netback(struct xenbus_device *dev, |
1952 | xenbus_transaction_end(xbt, 1); |
1953 | destroy_ring: |
1954 | xennet_disconnect_backend(info); |
1955 | - kfree(info->queues); |
1956 | - info->queues = NULL; |
1957 | + xennet_destroy_queues(info); |
1958 | out: |
1959 | + device_unregister(&dev->dev); |
1960 | return err; |
1961 | } |
1962 | |
1963 | diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c |
1964 | index 55ce769cecee..fbd6d487103f 100644 |
1965 | --- a/drivers/nvme/target/core.c |
1966 | +++ b/drivers/nvme/target/core.c |
1967 | @@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref) |
1968 | list_del(&ctrl->subsys_entry); |
1969 | mutex_unlock(&subsys->lock); |
1970 | |
1971 | + flush_work(&ctrl->async_event_work); |
1972 | + cancel_work_sync(&ctrl->fatal_err_work); |
1973 | + |
1974 | ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); |
1975 | nvmet_subsys_put(subsys); |
1976 | |
1977 | diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c |
1978 | index 91f5f55a8a9b..59059ffbb98c 100644 |
1979 | --- a/drivers/scsi/qla2xxx/qla_target.c |
1980 | +++ b/drivers/scsi/qla2xxx/qla_target.c |
1981 | @@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) |
1982 | { |
1983 | struct qla_hw_data *ha = vha->hw; |
1984 | struct qla_tgt_sess *sess = NULL; |
1985 | - uint32_t unpacked_lun, lun = 0; |
1986 | uint16_t loop_id; |
1987 | int res = 0; |
1988 | struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; |
1989 | - struct atio_from_isp *a = (struct atio_from_isp *)iocb; |
1990 | unsigned long flags; |
1991 | |
1992 | loop_id = le16_to_cpu(n->u.isp24.nport_handle); |
1993 | @@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) |
1994 | "loop_id %d)\n", vha->host_no, sess, sess->port_name, |
1995 | mcmd, loop_id); |
1996 | |
1997 | - lun = a->u.isp24.fcp_cmnd.lun; |
1998 | - unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); |
1999 | - |
2000 | - return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd, |
2001 | - iocb, QLA24XX_MGMT_SEND_NACK); |
2002 | + return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); |
2003 | } |
2004 | |
2005 | /* ha->tgt.sess_lock supposed to be held on entry */ |
2006 | diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c |
2007 | index c1eafbd7610a..da51fed143cd 100644 |
2008 | --- a/drivers/spi/spi-axi-spi-engine.c |
2009 | +++ b/drivers/spi/spi-axi-spi-engine.c |
2010 | @@ -553,7 +553,7 @@ static int spi_engine_probe(struct platform_device *pdev) |
2011 | |
2012 | static int spi_engine_remove(struct platform_device *pdev) |
2013 | { |
2014 | - struct spi_master *master = platform_get_drvdata(pdev); |
2015 | + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
2016 | struct spi_engine *spi_engine = spi_master_get_devdata(master); |
2017 | int irq = platform_get_irq(pdev, 0); |
2018 | |
2019 | @@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev) |
2020 | |
2021 | free_irq(irq, master); |
2022 | |
2023 | + spi_master_put(master); |
2024 | + |
2025 | writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); |
2026 | writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); |
2027 | writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); |
2028 | diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c |
2029 | index 1de3a772eb7d..cbf02ebb30a2 100644 |
2030 | --- a/drivers/spi/spi-sh-msiof.c |
2031 | +++ b/drivers/spi/spi-sh-msiof.c |
2032 | @@ -862,7 +862,7 @@ static int sh_msiof_transfer_one(struct spi_master *master, |
2033 | break; |
2034 | copy32 = copy_bswap32; |
2035 | } else if (bits <= 16) { |
2036 | - if (l & 1) |
2037 | + if (l & 3) |
2038 | break; |
2039 | copy32 = copy_wswap32; |
2040 | } else { |
2041 | diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c |
2042 | index 29dc249b0c74..3c2c233c2e49 100644 |
2043 | --- a/drivers/staging/greybus/loopback.c |
2044 | +++ b/drivers/staging/greybus/loopback.c |
2045 | @@ -1034,8 +1034,10 @@ static int gb_loopback_fn(void *data) |
2046 | error = gb_loopback_async_sink(gb, size); |
2047 | } |
2048 | |
2049 | - if (error) |
2050 | + if (error) { |
2051 | gb->error++; |
2052 | + gb->iteration_count++; |
2053 | + } |
2054 | } else { |
2055 | /* We are effectively single threaded here */ |
2056 | if (type == GB_LOOPBACK_TYPE_PING) |
2057 | diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c |
2058 | index 436691814a5e..27333d973bcd 100644 |
2059 | --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c |
2060 | +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c |
2061 | @@ -401,15 +401,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
2062 | result = VM_FAULT_LOCKED; |
2063 | break; |
2064 | case -ENODATA: |
2065 | + case -EAGAIN: |
2066 | case -EFAULT: |
2067 | result = VM_FAULT_NOPAGE; |
2068 | break; |
2069 | case -ENOMEM: |
2070 | result = VM_FAULT_OOM; |
2071 | break; |
2072 | - case -EAGAIN: |
2073 | - result = VM_FAULT_RETRY; |
2074 | - break; |
2075 | default: |
2076 | result = VM_FAULT_SIGBUS; |
2077 | break; |
2078 | diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c |
2079 | index 499d7bfe7147..75e6d5e0504f 100644 |
2080 | --- a/drivers/staging/media/cec/cec-adap.c |
2081 | +++ b/drivers/staging/media/cec/cec-adap.c |
2082 | @@ -608,8 +608,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, |
2083 | } |
2084 | memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len); |
2085 | if (msg->len == 1) { |
2086 | - if (cec_msg_initiator(msg) != 0xf || |
2087 | - cec_msg_destination(msg) == 0xf) { |
2088 | + if (cec_msg_destination(msg) == 0xf) { |
2089 | dprintk(1, "cec_transmit_msg: invalid poll message\n"); |
2090 | return -EINVAL; |
2091 | } |
2092 | @@ -634,7 +633,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, |
2093 | dprintk(1, "cec_transmit_msg: destination is the adapter itself\n"); |
2094 | return -EINVAL; |
2095 | } |
2096 | - if (cec_msg_initiator(msg) != 0xf && |
2097 | + if (msg->len > 1 && adap->is_configured && |
2098 | !cec_has_log_addr(adap, cec_msg_initiator(msg))) { |
2099 | dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n", |
2100 | cec_msg_initiator(msg)); |
2101 | @@ -883,7 +882,7 @@ static int cec_config_log_addr(struct cec_adapter *adap, |
2102 | |
2103 | /* Send poll message */ |
2104 | msg.len = 1; |
2105 | - msg.msg[0] = 0xf0 | log_addr; |
2106 | + msg.msg[0] = (log_addr << 4) | log_addr; |
2107 | err = cec_transmit_msg_fh(adap, &msg, NULL, true); |
2108 | |
2109 | /* |
2110 | diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c |
2111 | index ee2dcd05010f..0b60d1e0333e 100644 |
2112 | --- a/drivers/staging/rtl8188eu/core/rtw_mlme.c |
2113 | +++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c |
2114 | @@ -107,10 +107,10 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv) |
2115 | |
2116 | void rtw_free_mlme_priv(struct mlme_priv *pmlmepriv) |
2117 | { |
2118 | - rtw_free_mlme_priv_ie_data(pmlmepriv); |
2119 | - |
2120 | - if (pmlmepriv) |
2121 | + if (pmlmepriv) { |
2122 | + rtw_free_mlme_priv_ie_data(pmlmepriv); |
2123 | vfree(pmlmepriv->free_bss_buf); |
2124 | + } |
2125 | } |
2126 | |
2127 | struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv) |
2128 | diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c |
2129 | index f8c31070a337..2ffebb7e5ff8 100644 |
2130 | --- a/drivers/tty/serial/8250/8250_fintek.c |
2131 | +++ b/drivers/tty/serial/8250/8250_fintek.c |
2132 | @@ -121,7 +121,7 @@ static int fintek_8250_rs485_config(struct uart_port *port, |
2133 | |
2134 | if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) == |
2135 | (!!(rs485->flags & SER_RS485_RTS_AFTER_SEND))) |
2136 | - rs485->flags &= SER_RS485_ENABLED; |
2137 | + rs485->flags &= ~SER_RS485_ENABLED; |
2138 | else |
2139 | config |= RS485_URA; |
2140 | |
2141 | diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c |
2142 | index 22d32d295c5b..b80ea872b039 100644 |
2143 | --- a/drivers/tty/serial/8250/8250_pci.c |
2144 | +++ b/drivers/tty/serial/8250/8250_pci.c |
2145 | @@ -5568,6 +5568,9 @@ static struct pci_device_id serial_pci_tbl[] = { |
2146 | { PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 }, |
2147 | { PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 }, |
2148 | |
2149 | + /* Amazon PCI serial device */ |
2150 | + { PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 }, |
2151 | + |
2152 | /* |
2153 | * These entries match devices with class COMMUNICATION_SERIAL, |
2154 | * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL |
2155 | diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c |
2156 | index 1ef31e3ee4a1..f6e4373a8850 100644 |
2157 | --- a/drivers/tty/serial/8250/8250_port.c |
2158 | +++ b/drivers/tty/serial/8250/8250_port.c |
2159 | @@ -2526,8 +2526,11 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud, |
2160 | serial_dl_write(up, quot); |
2161 | |
2162 | /* XR17V35x UARTs have an extra fractional divisor register (DLD) */ |
2163 | - if (up->port.type == PORT_XR17V35X) |
2164 | + if (up->port.type == PORT_XR17V35X) { |
2165 | + /* Preserve bits not related to baudrate; DLD[7:4]. */ |
2166 | + quot_frac |= serial_port_in(port, 0x2) & 0xf0; |
2167 | serial_port_out(port, 0x2, quot_frac); |
2168 | + } |
2169 | } |
2170 | |
2171 | static unsigned int serial8250_get_baud_rate(struct uart_port *port, |
2172 | diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c |
2173 | index 701c085bb19b..53cbf4ebef10 100644 |
2174 | --- a/drivers/tty/sysrq.c |
2175 | +++ b/drivers/tty/sysrq.c |
2176 | @@ -243,8 +243,10 @@ static void sysrq_handle_showallcpus(int key) |
2177 | * architecture has no support for it: |
2178 | */ |
2179 | if (!trigger_all_cpu_backtrace()) { |
2180 | - struct pt_regs *regs = get_irq_regs(); |
2181 | + struct pt_regs *regs = NULL; |
2182 | |
2183 | + if (in_irq()) |
2184 | + regs = get_irq_regs(); |
2185 | if (regs) { |
2186 | pr_info("CPU%d:\n", smp_processor_id()); |
2187 | show_regs(regs); |
2188 | @@ -263,7 +265,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = { |
2189 | |
2190 | static void sysrq_handle_showregs(int key) |
2191 | { |
2192 | - struct pt_regs *regs = get_irq_regs(); |
2193 | + struct pt_regs *regs = NULL; |
2194 | + |
2195 | + if (in_irq()) |
2196 | + regs = get_irq_regs(); |
2197 | if (regs) |
2198 | show_regs(regs); |
2199 | perf_event_print_debug(); |
2200 | diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c |
2201 | index 5008f71fb08d..5ebe04d3598b 100644 |
2202 | --- a/drivers/usb/core/config.c |
2203 | +++ b/drivers/usb/core/config.c |
2204 | @@ -900,14 +900,25 @@ void usb_release_bos_descriptor(struct usb_device *dev) |
2205 | } |
2206 | } |
2207 | |
2208 | +static const __u8 bos_desc_len[256] = { |
2209 | + [USB_CAP_TYPE_WIRELESS_USB] = USB_DT_USB_WIRELESS_CAP_SIZE, |
2210 | + [USB_CAP_TYPE_EXT] = USB_DT_USB_EXT_CAP_SIZE, |
2211 | + [USB_SS_CAP_TYPE] = USB_DT_USB_SS_CAP_SIZE, |
2212 | + [USB_SSP_CAP_TYPE] = USB_DT_USB_SSP_CAP_SIZE(1), |
2213 | + [CONTAINER_ID_TYPE] = USB_DT_USB_SS_CONTN_ID_SIZE, |
2214 | + [USB_PTM_CAP_TYPE] = USB_DT_USB_PTM_ID_SIZE, |
2215 | +}; |
2216 | + |
2217 | /* Get BOS descriptor set */ |
2218 | int usb_get_bos_descriptor(struct usb_device *dev) |
2219 | { |
2220 | struct device *ddev = &dev->dev; |
2221 | struct usb_bos_descriptor *bos; |
2222 | struct usb_dev_cap_header *cap; |
2223 | + struct usb_ssp_cap_descriptor *ssp_cap; |
2224 | unsigned char *buffer; |
2225 | - int length, total_len, num, i; |
2226 | + int length, total_len, num, i, ssac; |
2227 | + __u8 cap_type; |
2228 | int ret; |
2229 | |
2230 | bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL); |
2231 | @@ -960,7 +971,13 @@ int usb_get_bos_descriptor(struct usb_device *dev) |
2232 | dev->bos->desc->bNumDeviceCaps = i; |
2233 | break; |
2234 | } |
2235 | + cap_type = cap->bDevCapabilityType; |
2236 | length = cap->bLength; |
2237 | + if (bos_desc_len[cap_type] && length < bos_desc_len[cap_type]) { |
2238 | + dev->bos->desc->bNumDeviceCaps = i; |
2239 | + break; |
2240 | + } |
2241 | + |
2242 | total_len -= length; |
2243 | |
2244 | if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { |
2245 | @@ -968,7 +985,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) |
2246 | continue; |
2247 | } |
2248 | |
2249 | - switch (cap->bDevCapabilityType) { |
2250 | + switch (cap_type) { |
2251 | case USB_CAP_TYPE_WIRELESS_USB: |
2252 | /* Wireless USB cap descriptor is handled by wusb */ |
2253 | break; |
2254 | @@ -981,8 +998,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) |
2255 | (struct usb_ss_cap_descriptor *)buffer; |
2256 | break; |
2257 | case USB_SSP_CAP_TYPE: |
2258 | - dev->bos->ssp_cap = |
2259 | - (struct usb_ssp_cap_descriptor *)buffer; |
2260 | + ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; |
2261 | + ssac = (le32_to_cpu(ssp_cap->bmAttributes) & |
2262 | + USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1; |
2263 | + if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) |
2264 | + dev->bos->ssp_cap = ssp_cap; |
2265 | break; |
2266 | case CONTAINER_ID_TYPE: |
2267 | dev->bos->ss_id = |
2268 | diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c |
2269 | index fa619354c5c5..893ebae51029 100644 |
2270 | --- a/drivers/usb/core/devio.c |
2271 | +++ b/drivers/usb/core/devio.c |
2272 | @@ -134,42 +134,38 @@ enum snoop_when { |
2273 | #define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0) |
2274 | |
2275 | /* Limit on the total amount of memory we can allocate for transfers */ |
2276 | -static unsigned usbfs_memory_mb = 16; |
2277 | +static u32 usbfs_memory_mb = 16; |
2278 | module_param(usbfs_memory_mb, uint, 0644); |
2279 | MODULE_PARM_DESC(usbfs_memory_mb, |
2280 | "maximum MB allowed for usbfs buffers (0 = no limit)"); |
2281 | |
2282 | /* Hard limit, necessary to avoid arithmetic overflow */ |
2283 | -#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) |
2284 | +#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000) |
2285 | |
2286 | -static atomic_t usbfs_memory_usage; /* Total memory currently allocated */ |
2287 | +static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ |
2288 | |
2289 | /* Check whether it's okay to allocate more memory for a transfer */ |
2290 | -static int usbfs_increase_memory_usage(unsigned amount) |
2291 | +static int usbfs_increase_memory_usage(u64 amount) |
2292 | { |
2293 | - unsigned lim; |
2294 | + u64 lim; |
2295 | |
2296 | - /* |
2297 | - * Convert usbfs_memory_mb to bytes, avoiding overflows. |
2298 | - * 0 means use the hard limit (effectively unlimited). |
2299 | - */ |
2300 | lim = ACCESS_ONCE(usbfs_memory_mb); |
2301 | - if (lim == 0 || lim > (USBFS_XFER_MAX >> 20)) |
2302 | - lim = USBFS_XFER_MAX; |
2303 | - else |
2304 | - lim <<= 20; |
2305 | + lim <<= 20; |
2306 | |
2307 | - atomic_add(amount, &usbfs_memory_usage); |
2308 | - if (atomic_read(&usbfs_memory_usage) <= lim) |
2309 | - return 0; |
2310 | - atomic_sub(amount, &usbfs_memory_usage); |
2311 | - return -ENOMEM; |
2312 | + atomic64_add(amount, &usbfs_memory_usage); |
2313 | + |
2314 | + if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) { |
2315 | + atomic64_sub(amount, &usbfs_memory_usage); |
2316 | + return -ENOMEM; |
2317 | + } |
2318 | + |
2319 | + return 0; |
2320 | } |
2321 | |
2322 | /* Memory for a transfer is being deallocated */ |
2323 | -static void usbfs_decrease_memory_usage(unsigned amount) |
2324 | +static void usbfs_decrease_memory_usage(u64 amount) |
2325 | { |
2326 | - atomic_sub(amount, &usbfs_memory_usage); |
2327 | + atomic64_sub(amount, &usbfs_memory_usage); |
2328 | } |
2329 | |
2330 | static int connected(struct usb_dev_state *ps) |
2331 | @@ -1191,7 +1187,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg) |
2332 | if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN))) |
2333 | return -EINVAL; |
2334 | len1 = bulk.len; |
2335 | - if (len1 >= USBFS_XFER_MAX) |
2336 | + if (len1 >= (INT_MAX - sizeof(struct urb))) |
2337 | return -EINVAL; |
2338 | ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); |
2339 | if (ret) |
2340 | @@ -1458,13 +1454,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb |
2341 | int number_of_packets = 0; |
2342 | unsigned int stream_id = 0; |
2343 | void *buf; |
2344 | - |
2345 | - if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP | |
2346 | - USBDEVFS_URB_SHORT_NOT_OK | |
2347 | + unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | |
2348 | USBDEVFS_URB_BULK_CONTINUATION | |
2349 | USBDEVFS_URB_NO_FSBR | |
2350 | USBDEVFS_URB_ZERO_PACKET | |
2351 | - USBDEVFS_URB_NO_INTERRUPT)) |
2352 | + USBDEVFS_URB_NO_INTERRUPT; |
2353 | + /* USBDEVFS_URB_ISO_ASAP is a special case */ |
2354 | + if (uurb->type == USBDEVFS_URB_TYPE_ISO) |
2355 | + mask |= USBDEVFS_URB_ISO_ASAP; |
2356 | + |
2357 | + if (uurb->flags & ~mask) |
2358 | + return -EINVAL; |
2359 | + |
2360 | + if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX) |
2361 | return -EINVAL; |
2362 | if (uurb->buffer_length > 0 && !uurb->buffer) |
2363 | return -EINVAL; |
2364 | @@ -1584,10 +1586,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb |
2365 | return -EINVAL; |
2366 | } |
2367 | |
2368 | - if (uurb->buffer_length >= USBFS_XFER_MAX) { |
2369 | - ret = -EINVAL; |
2370 | - goto error; |
2371 | - } |
2372 | if (uurb->buffer_length > 0 && |
2373 | !access_ok(is_in ? VERIFY_WRITE : VERIFY_READ, |
2374 | uurb->buffer, uurb->buffer_length)) { |
2375 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
2376 | index 706b3d6a7614..d0d3f9ef9f10 100644 |
2377 | --- a/drivers/usb/core/hub.c |
2378 | +++ b/drivers/usb/core/hub.c |
2379 | @@ -4925,6 +4925,15 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, |
2380 | usb_put_dev(udev); |
2381 | if ((status == -ENOTCONN) || (status == -ENOTSUPP)) |
2382 | break; |
2383 | + |
2384 | + /* When halfway through our retry count, power-cycle the port */ |
2385 | + if (i == (SET_CONFIG_TRIES / 2) - 1) { |
2386 | + dev_info(&port_dev->dev, "attempt power cycle\n"); |
2387 | + usb_hub_set_port_power(hdev, hub, port1, false); |
2388 | + msleep(2 * hub_power_on_good_delay(hub)); |
2389 | + usb_hub_set_port_power(hdev, hub, port1, true); |
2390 | + msleep(hub_power_on_good_delay(hub)); |
2391 | + } |
2392 | } |
2393 | if (hub->hdev->parent || |
2394 | !hcd->driver->port_handed_over || |
2395 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
2396 | index 37c418e581fb..50010282c010 100644 |
2397 | --- a/drivers/usb/core/quirks.c |
2398 | +++ b/drivers/usb/core/quirks.c |
2399 | @@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = { |
2400 | /* appletouch */ |
2401 | { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, |
2402 | |
2403 | + /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ |
2404 | + { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, |
2405 | + |
2406 | /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ |
2407 | { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, |
2408 | |
2409 | diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
2410 | index 273320fa30ae..4fce83266926 100644 |
2411 | --- a/drivers/usb/gadget/function/f_fs.c |
2412 | +++ b/drivers/usb/gadget/function/f_fs.c |
2413 | @@ -2263,7 +2263,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, |
2414 | |
2415 | if (len < sizeof(*d) || |
2416 | d->bFirstInterfaceNumber >= ffs->interfaces_count || |
2417 | - !d->Reserved1) |
2418 | + d->Reserved1) |
2419 | return -EINVAL; |
2420 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) |
2421 | if (d->Reserved2[i]) |
2422 | diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c |
2423 | index 1a2614aae42c..3ff6468a1f5f 100644 |
2424 | --- a/drivers/usb/host/ehci-dbg.c |
2425 | +++ b/drivers/usb/host/ehci-dbg.c |
2426 | @@ -837,7 +837,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf) |
2427 | default: /* unknown */ |
2428 | break; |
2429 | } |
2430 | - temp = (cap >> 8) & 0xff; |
2431 | + offset = (cap >> 8) & 0xff; |
2432 | } |
2433 | } |
2434 | #endif |
2435 | diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c |
2436 | index b7114c3f52aa..a3ecd8bd5324 100644 |
2437 | --- a/drivers/usb/host/xhci-mem.c |
2438 | +++ b/drivers/usb/host/xhci-mem.c |
2439 | @@ -996,6 +996,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) |
2440 | if (!vdev) |
2441 | return; |
2442 | |
2443 | + if (vdev->real_port == 0 || |
2444 | + vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) { |
2445 | + xhci_dbg(xhci, "Bad vdev->real_port.\n"); |
2446 | + goto out; |
2447 | + } |
2448 | + |
2449 | tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); |
2450 | list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { |
2451 | /* is this a hub device that added a tt_info to the tts list */ |
2452 | @@ -1009,6 +1015,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) |
2453 | } |
2454 | } |
2455 | } |
2456 | +out: |
2457 | /* we are now at a leaf device */ |
2458 | xhci_free_virt_device(xhci, slot_id); |
2459 | } |
2460 | diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c |
2461 | index ab5d364f6e8c..335a1ef35224 100644 |
2462 | --- a/drivers/usb/phy/phy-tahvo.c |
2463 | +++ b/drivers/usb/phy/phy-tahvo.c |
2464 | @@ -368,7 +368,8 @@ static int tahvo_usb_probe(struct platform_device *pdev) |
2465 | tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable); |
2466 | if (IS_ERR(tu->extcon)) { |
2467 | dev_err(&pdev->dev, "failed to allocate memory for extcon\n"); |
2468 | - return -ENOMEM; |
2469 | + ret = PTR_ERR(tu->extcon); |
2470 | + goto err_disable_clk; |
2471 | } |
2472 | |
2473 | ret = devm_extcon_dev_register(&pdev->dev, tu->extcon); |
2474 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2475 | index db3d34c2c82e..ffa8ec917ff5 100644 |
2476 | --- a/drivers/usb/serial/option.c |
2477 | +++ b/drivers/usb/serial/option.c |
2478 | @@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb); |
2479 | /* These Quectel products use Quectel's vendor ID */ |
2480 | #define QUECTEL_PRODUCT_EC21 0x0121 |
2481 | #define QUECTEL_PRODUCT_EC25 0x0125 |
2482 | +#define QUECTEL_PRODUCT_BG96 0x0296 |
2483 | |
2484 | #define CMOTECH_VENDOR_ID 0x16d8 |
2485 | #define CMOTECH_PRODUCT_6001 0x6001 |
2486 | @@ -1185,6 +1186,8 @@ static const struct usb_device_id option_ids[] = { |
2487 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
2488 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), |
2489 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
2490 | + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96), |
2491 | + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
2492 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
2493 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
2494 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), |
2495 | diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h |
2496 | index a155cd02bce2..ecc83c405a8b 100644 |
2497 | --- a/drivers/usb/storage/uas-detect.h |
2498 | +++ b/drivers/usb/storage/uas-detect.h |
2499 | @@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf, |
2500 | } |
2501 | } |
2502 | |
2503 | + /* All Seagate disk enclosures have broken ATA pass-through support */ |
2504 | + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) |
2505 | + flags |= US_FL_NO_ATA_1X; |
2506 | + |
2507 | usb_stor_adjust_quirks(udev, &flags); |
2508 | |
2509 | if (flags & US_FL_IGNORE_UAS) { |
2510 | diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c |
2511 | index 85d3e648bdea..59b3f62a2d64 100644 |
2512 | --- a/drivers/vfio/vfio_iommu_spapr_tce.c |
2513 | +++ b/drivers/vfio/vfio_iommu_spapr_tce.c |
2514 | @@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data, |
2515 | mutex_lock(&container->lock); |
2516 | |
2517 | ret = tce_iommu_create_default_window(container); |
2518 | - if (ret) |
2519 | - return ret; |
2520 | - |
2521 | - ret = tce_iommu_create_window(container, create.page_shift, |
2522 | - create.window_size, create.levels, |
2523 | - &create.start_addr); |
2524 | + if (!ret) |
2525 | + ret = tce_iommu_create_window(container, |
2526 | + create.page_shift, |
2527 | + create.window_size, create.levels, |
2528 | + &create.start_addr); |
2529 | |
2530 | mutex_unlock(&container->lock); |
2531 | |
2532 | diff --git a/fs/dax.c b/fs/dax.c |
2533 | index bf6218da7928..800748f10b3d 100644 |
2534 | --- a/fs/dax.c |
2535 | +++ b/fs/dax.c |
2536 | @@ -1265,6 +1265,17 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data, |
2537 | if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) |
2538 | return -EIO; |
2539 | |
2540 | + /* |
2541 | + * Write can allocate block for an area which has a hole page mapped |
2542 | + * into page tables. We have to tear down these mappings so that data |
2543 | + * written by write(2) is visible in mmap. |
2544 | + */ |
2545 | + if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) { |
2546 | + invalidate_inode_pages2_range(inode->i_mapping, |
2547 | + pos >> PAGE_SHIFT, |
2548 | + (end - 1) >> PAGE_SHIFT); |
2549 | + } |
2550 | + |
2551 | while (pos < end) { |
2552 | unsigned offset = pos & (PAGE_SIZE - 1); |
2553 | struct blk_dax_ctl dax = { 0 }; |
2554 | @@ -1329,23 +1340,6 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, |
2555 | if (iov_iter_rw(iter) == WRITE) |
2556 | flags |= IOMAP_WRITE; |
2557 | |
2558 | - /* |
2559 | - * Yes, even DAX files can have page cache attached to them: A zeroed |
2560 | - * page is inserted into the pagecache when we have to serve a write |
2561 | - * fault on a hole. It should never be dirtied and can simply be |
2562 | - * dropped from the pagecache once we get real data for the page. |
2563 | - * |
2564 | - * XXX: This is racy against mmap, and there's nothing we can do about |
2565 | - * it. We'll eventually need to shift this down even further so that |
2566 | - * we can check if we allocated blocks over a hole first. |
2567 | - */ |
2568 | - if (mapping->nrpages) { |
2569 | - ret = invalidate_inode_pages2_range(mapping, |
2570 | - pos >> PAGE_SHIFT, |
2571 | - (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT); |
2572 | - WARN_ON_ONCE(ret); |
2573 | - } |
2574 | - |
2575 | while (iov_iter_count(iter)) { |
2576 | ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, |
2577 | iter, iomap_dax_actor); |
2578 | diff --git a/fs/libfs.c b/fs/libfs.c |
2579 | index 48826d4da189..9588780ad43e 100644 |
2580 | --- a/fs/libfs.c |
2581 | +++ b/fs/libfs.c |
2582 | @@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name, |
2583 | struct inode *root; |
2584 | struct qstr d_name = QSTR_INIT(name, strlen(name)); |
2585 | |
2586 | - s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL); |
2587 | + s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER, |
2588 | + &init_user_ns, NULL); |
2589 | if (IS_ERR(s)) |
2590 | return ERR_CAST(s); |
2591 | |
2592 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
2593 | index 67845220fc27..4638654e26f3 100644 |
2594 | --- a/fs/nfs/nfs4proc.c |
2595 | +++ b/fs/nfs/nfs4proc.c |
2596 | @@ -38,7 +38,6 @@ |
2597 | #include <linux/mm.h> |
2598 | #include <linux/delay.h> |
2599 | #include <linux/errno.h> |
2600 | -#include <linux/file.h> |
2601 | #include <linux/string.h> |
2602 | #include <linux/ratelimit.h> |
2603 | #include <linux/printk.h> |
2604 | @@ -6006,7 +6005,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, |
2605 | p->server = server; |
2606 | atomic_inc(&lsp->ls_count); |
2607 | p->ctx = get_nfs_open_context(ctx); |
2608 | - get_file(fl->fl_file); |
2609 | memcpy(&p->fl, fl, sizeof(p->fl)); |
2610 | return p; |
2611 | out_free_seqid: |
2612 | @@ -6119,7 +6117,6 @@ static void nfs4_lock_release(void *calldata) |
2613 | nfs_free_seqid(data->arg.lock_seqid); |
2614 | nfs4_put_lock_state(data->lsp); |
2615 | put_nfs_open_context(data->ctx); |
2616 | - fput(data->fl.fl_file); |
2617 | kfree(data); |
2618 | dprintk("%s: done!\n", __func__); |
2619 | } |
2620 | diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c |
2621 | index 92671914067f..71deeae6eefd 100644 |
2622 | --- a/fs/nfs/nfs4state.c |
2623 | +++ b/fs/nfs/nfs4state.c |
2624 | @@ -1718,7 +1718,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) |
2625 | break; |
2626 | case -NFS4ERR_STALE_CLIENTID: |
2627 | set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); |
2628 | - nfs4_state_clear_reclaim_reboot(clp); |
2629 | nfs4_state_start_reclaim_reboot(clp); |
2630 | break; |
2631 | case -NFS4ERR_EXPIRED: |
2632 | diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h |
2633 | index 447a915db25d..4431ea2c8802 100644 |
2634 | --- a/include/linux/buffer_head.h |
2635 | +++ b/include/linux/buffer_head.h |
2636 | @@ -239,12 +239,10 @@ static inline int block_page_mkwrite_return(int err) |
2637 | { |
2638 | if (err == 0) |
2639 | return VM_FAULT_LOCKED; |
2640 | - if (err == -EFAULT) |
2641 | + if (err == -EFAULT || err == -EAGAIN) |
2642 | return VM_FAULT_NOPAGE; |
2643 | if (err == -ENOMEM) |
2644 | return VM_FAULT_OOM; |
2645 | - if (err == -EAGAIN) |
2646 | - return VM_FAULT_RETRY; |
2647 | /* -ENOSPC, -EDQUOT, -EIO ... */ |
2648 | return VM_FAULT_SIGBUS; |
2649 | } |
2650 | diff --git a/include/linux/fence.h b/include/linux/fence.h |
2651 | index 0d763053f97a..9bb2c0c97a21 100644 |
2652 | --- a/include/linux/fence.h |
2653 | +++ b/include/linux/fence.h |
2654 | @@ -47,7 +47,7 @@ struct fence_cb; |
2655 | * can be compared to decide which fence would be signaled later. |
2656 | * @flags: A mask of FENCE_FLAG_* defined below |
2657 | * @timestamp: Timestamp when the fence was signaled. |
2658 | - * @status: Optional, only valid if < 0, must be set before calling |
2659 | + * @error: Optional, only valid if < 0, must be set before calling |
2660 | * fence_signal, indicates that the fence has completed with an error. |
2661 | * |
2662 | * the flags member must be manipulated and read using the appropriate |
2663 | @@ -79,7 +79,7 @@ struct fence { |
2664 | unsigned seqno; |
2665 | unsigned long flags; |
2666 | ktime_t timestamp; |
2667 | - int status; |
2668 | + int error; |
2669 | }; |
2670 | |
2671 | enum fence_flag_bits { |
2672 | @@ -132,7 +132,7 @@ struct fence_cb { |
2673 | * or some failure occurred that made it impossible to enable |
2674 | * signaling. True indicates successful enabling. |
2675 | * |
2676 | - * fence->status may be set in enable_signaling, but only when false is |
2677 | + * fence->error may be set in enable_signaling, but only when false is |
2678 | * returned. |
2679 | * |
2680 | * Calling fence_signal before enable_signaling is called allows |
2681 | @@ -144,7 +144,7 @@ struct fence_cb { |
2682 | * the second time will be a noop since it was already signaled. |
2683 | * |
2684 | * Notes on signaled: |
2685 | - * May set fence->status if returning true. |
2686 | + * May set fence->error if returning true. |
2687 | * |
2688 | * Notes on wait: |
2689 | * Must not be NULL, set to fence_default_wait for default implementation. |
2690 | @@ -280,6 +280,19 @@ fence_is_signaled(struct fence *fence) |
2691 | return false; |
2692 | } |
2693 | |
2694 | +/** |
2695 | + * __fence_is_later - return if f1 is chronologically later than f2 |
2696 | + * @f1: [in] the first fence's seqno |
2697 | + * @f2: [in] the second fence's seqno from the same context |
2698 | + * |
2699 | + * Returns true if f1 is chronologically later than f2. Both fences must be |
2700 | + * from the same context, since a seqno is not common across contexts. |
2701 | + */ |
2702 | +static inline bool __fence_is_later(u32 f1, u32 f2) |
2703 | +{ |
2704 | + return (int)(f1 - f2) > 0; |
2705 | +} |
2706 | + |
2707 | /** |
2708 | * fence_is_later - return if f1 is chronologically later than f2 |
2709 | * @f1: [in] the first fence from the same context |
2710 | @@ -293,7 +306,7 @@ static inline bool fence_is_later(struct fence *f1, struct fence *f2) |
2711 | if (WARN_ON(f1->context != f2->context)) |
2712 | return false; |
2713 | |
2714 | - return (int)(f1->seqno - f2->seqno) > 0; |
2715 | + return __fence_is_later(f1->seqno, f2->seqno); |
2716 | } |
2717 | |
2718 | /** |
2719 | @@ -321,6 +334,50 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2) |
2720 | return fence_is_signaled(f2) ? NULL : f2; |
2721 | } |
2722 | |
2723 | +/** |
2724 | + * fence_get_status_locked - returns the status upon completion |
2725 | + * @fence: [in] the fence to query |
2726 | + * |
2727 | + * Drivers can supply an optional error status condition before they signal |
2728 | + * the fence (to indicate whether the fence was completed due to an error |
2729 | + * rather than success). The value of the status condition is only valid |
2730 | + * if the fence has been signaled, fence_get_status_locked() first checks |
2731 | + * the signal state before reporting the error status. |
2732 | + * |
2733 | + * Returns 0 if the fence has not yet been signaled, 1 if the fence has |
2734 | + * been signaled without an error condition, or a negative error code |
2735 | + * if the fence has been completed in err. |
2736 | + */ |
2737 | +static inline int fence_get_status_locked(struct fence *fence) |
2738 | +{ |
2739 | + if (fence_is_signaled_locked(fence)) |
2740 | + return fence->error ?: 1; |
2741 | + else |
2742 | + return 0; |
2743 | +} |
2744 | + |
2745 | +int fence_get_status(struct fence *fence); |
2746 | + |
2747 | +/** |
2748 | + * fence_set_error - flag an error condition on the fence |
2749 | + * @fence: [in] the fence |
2750 | + * @error: [in] the error to store |
2751 | + * |
2752 | + * Drivers can supply an optional error status condition before they signal |
2753 | + * the fence, to indicate that the fence was completed due to an error |
2754 | + * rather than success. This must be set before signaling (so that the value |
2755 | + * is visible before any waiters on the signal callback are woken). This |
2756 | + * helper exists to help catching erroneous setting of #fence.error. |
2757 | + */ |
2758 | +static inline void fence_set_error(struct fence *fence, |
2759 | + int error) |
2760 | +{ |
2761 | + BUG_ON(test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)); |
2762 | + BUG_ON(error >= 0 || error < -MAX_ERRNO); |
2763 | + |
2764 | + fence->error = error; |
2765 | +} |
2766 | + |
2767 | signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); |
2768 | signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, |
2769 | bool intr, signed long timeout); |
2770 | diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h |
2771 | index 4741ecdb9817..78ed8105e64d 100644 |
2772 | --- a/include/linux/perf_event.h |
2773 | +++ b/include/linux/perf_event.h |
2774 | @@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event); |
2775 | extern void perf_event_disable_local(struct perf_event *event); |
2776 | extern void perf_event_disable_inatomic(struct perf_event *event); |
2777 | extern void perf_event_task_tick(void); |
2778 | +extern int perf_event_account_interrupt(struct perf_event *event); |
2779 | #else /* !CONFIG_PERF_EVENTS: */ |
2780 | static inline void * |
2781 | perf_aux_output_begin(struct perf_output_handle *handle, |
2782 | diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h |
2783 | index 5e64a86989a5..ab1dadba9923 100644 |
2784 | --- a/include/uapi/linux/usb/ch9.h |
2785 | +++ b/include/uapi/linux/usb/ch9.h |
2786 | @@ -854,6 +854,8 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ |
2787 | __u8 bReserved; |
2788 | } __attribute__((packed)); |
2789 | |
2790 | +#define USB_DT_USB_WIRELESS_CAP_SIZE 11 |
2791 | + |
2792 | /* USB 2.0 Extension descriptor */ |
2793 | #define USB_CAP_TYPE_EXT 2 |
2794 | |
2795 | @@ -1046,6 +1048,7 @@ struct usb_ptm_cap_descriptor { |
2796 | __u8 bDevCapabilityType; |
2797 | } __attribute__((packed)); |
2798 | |
2799 | +#define USB_DT_USB_PTM_ID_SIZE 3 |
2800 | /* |
2801 | * The size of the descriptor for the Sublink Speed Attribute Count |
2802 | * (SSAC) specified in bmAttributes[4:0]. |
2803 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
2804 | index 36ff2d93f222..13b9784427b0 100644 |
2805 | --- a/kernel/events/core.c |
2806 | +++ b/kernel/events/core.c |
2807 | @@ -7088,25 +7088,12 @@ static void perf_log_itrace_start(struct perf_event *event) |
2808 | perf_output_end(&handle); |
2809 | } |
2810 | |
2811 | -/* |
2812 | - * Generic event overflow handling, sampling. |
2813 | - */ |
2814 | - |
2815 | -static int __perf_event_overflow(struct perf_event *event, |
2816 | - int throttle, struct perf_sample_data *data, |
2817 | - struct pt_regs *regs) |
2818 | +static int |
2819 | +__perf_event_account_interrupt(struct perf_event *event, int throttle) |
2820 | { |
2821 | - int events = atomic_read(&event->event_limit); |
2822 | struct hw_perf_event *hwc = &event->hw; |
2823 | - u64 seq; |
2824 | int ret = 0; |
2825 | - |
2826 | - /* |
2827 | - * Non-sampling counters might still use the PMI to fold short |
2828 | - * hardware counters, ignore those. |
2829 | - */ |
2830 | - if (unlikely(!is_sampling_event(event))) |
2831 | - return 0; |
2832 | + u64 seq; |
2833 | |
2834 | seq = __this_cpu_read(perf_throttled_seq); |
2835 | if (seq != hwc->interrupts_seq) { |
2836 | @@ -7134,6 +7121,34 @@ static int __perf_event_overflow(struct perf_event *event, |
2837 | perf_adjust_period(event, delta, hwc->last_period, true); |
2838 | } |
2839 | |
2840 | + return ret; |
2841 | +} |
2842 | + |
2843 | +int perf_event_account_interrupt(struct perf_event *event) |
2844 | +{ |
2845 | + return __perf_event_account_interrupt(event, 1); |
2846 | +} |
2847 | + |
2848 | +/* |
2849 | + * Generic event overflow handling, sampling. |
2850 | + */ |
2851 | + |
2852 | +static int __perf_event_overflow(struct perf_event *event, |
2853 | + int throttle, struct perf_sample_data *data, |
2854 | + struct pt_regs *regs) |
2855 | +{ |
2856 | + int events = atomic_read(&event->event_limit); |
2857 | + int ret = 0; |
2858 | + |
2859 | + /* |
2860 | + * Non-sampling counters might still use the PMI to fold short |
2861 | + * hardware counters, ignore those. |
2862 | + */ |
2863 | + if (unlikely(!is_sampling_event(event))) |
2864 | + return 0; |
2865 | + |
2866 | + ret = __perf_event_account_interrupt(event, throttle); |
2867 | + |
2868 | /* |
2869 | * XXX event_limit might not quite work as expected on inherited |
2870 | * events |
2871 | diff --git a/mm/oom_kill.c b/mm/oom_kill.c |
2872 | index d631d251c150..4a184157cc3d 100644 |
2873 | --- a/mm/oom_kill.c |
2874 | +++ b/mm/oom_kill.c |
2875 | @@ -524,7 +524,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) |
2876 | */ |
2877 | set_bit(MMF_UNSTABLE, &mm->flags); |
2878 | |
2879 | - tlb_gather_mmu(&tlb, mm, 0, -1); |
2880 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { |
2881 | if (is_vm_hugetlb_page(vma)) |
2882 | continue; |
2883 | @@ -546,11 +545,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) |
2884 | * we do not want to block exit_mmap by keeping mm ref |
2885 | * count elevated without a good reason. |
2886 | */ |
2887 | - if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) |
2888 | + if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) { |
2889 | + tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end); |
2890 | unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end, |
2891 | &details); |
2892 | + tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end); |
2893 | + } |
2894 | } |
2895 | - tlb_finish_mmu(&tlb, 0, -1); |
2896 | pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n", |
2897 | task_pid_nr(tsk), tsk->comm, |
2898 | K(get_mm_counter(mm, MM_ANONPAGES)), |
2899 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
2900 | index ef5ee56095e8..fbc38888252b 100644 |
2901 | --- a/mm/page_alloc.c |
2902 | +++ b/mm/page_alloc.c |
2903 | @@ -2592,30 +2592,23 @@ int __isolate_free_page(struct page *page, unsigned int order) |
2904 | * Update NUMA hit/miss statistics |
2905 | * |
2906 | * Must be called with interrupts disabled. |
2907 | - * |
2908 | - * When __GFP_OTHER_NODE is set assume the node of the preferred |
2909 | - * zone is the local node. This is useful for daemons who allocate |
2910 | - * memory on behalf of other processes. |
2911 | */ |
2912 | static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, |
2913 | gfp_t flags) |
2914 | { |
2915 | #ifdef CONFIG_NUMA |
2916 | - int local_nid = numa_node_id(); |
2917 | enum zone_stat_item local_stat = NUMA_LOCAL; |
2918 | |
2919 | - if (unlikely(flags & __GFP_OTHER_NODE)) { |
2920 | + if (z->node != numa_node_id()) |
2921 | local_stat = NUMA_OTHER; |
2922 | - local_nid = preferred_zone->node; |
2923 | - } |
2924 | |
2925 | - if (z->node == local_nid) { |
2926 | + if (z->node == preferred_zone->node) |
2927 | __inc_zone_state(z, NUMA_HIT); |
2928 | - __inc_zone_state(z, local_stat); |
2929 | - } else { |
2930 | + else { |
2931 | __inc_zone_state(z, NUMA_MISS); |
2932 | __inc_zone_state(preferred_zone, NUMA_FOREIGN); |
2933 | } |
2934 | + __inc_zone_state(z, local_stat); |
2935 | #endif |
2936 | } |
2937 | |
2938 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
2939 | index 8fcd0c642742..05255a286888 100644 |
2940 | --- a/net/ipv4/tcp_input.c |
2941 | +++ b/net/ipv4/tcp_input.c |
2942 | @@ -5081,7 +5081,7 @@ static void tcp_check_space(struct sock *sk) |
2943 | if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { |
2944 | sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); |
2945 | /* pairs with tcp_poll() */ |
2946 | - smp_mb__after_atomic(); |
2947 | + smp_mb(); |
2948 | if (sk->sk_socket && |
2949 | test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) |
2950 | tcp_new_space(sk); |
2951 | diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c |
2952 | index 816f79d1a8a3..67e882d49195 100644 |
2953 | --- a/net/ipv6/ip6_vti.c |
2954 | +++ b/net/ipv6/ip6_vti.c |
2955 | @@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev) |
2956 | struct vti6_net *ip6n = net_generic(net, vti6_net_id); |
2957 | int err; |
2958 | |
2959 | + dev->rtnl_link_ops = &vti6_link_ops; |
2960 | err = register_netdevice(dev); |
2961 | if (err < 0) |
2962 | goto out; |
2963 | |
2964 | strcpy(t->parms.name, dev->name); |
2965 | - dev->rtnl_link_ops = &vti6_link_ops; |
2966 | |
2967 | dev_hold(dev); |
2968 | vti6_tnl_link(ip6n, t); |
2969 | diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c |
2970 | index 3468d5635d0a..9d77a54e8854 100644 |
2971 | --- a/net/l2tp/l2tp_ip.c |
2972 | +++ b/net/l2tp/l2tp_ip.c |
2973 | @@ -48,7 +48,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) |
2974 | return (struct l2tp_ip_sock *)sk; |
2975 | } |
2976 | |
2977 | -static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) |
2978 | +static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr, |
2979 | + __be32 raddr, int dif, u32 tunnel_id) |
2980 | { |
2981 | struct sock *sk; |
2982 | |
2983 | @@ -62,6 +63,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif |
2984 | if ((l2tp->conn_id == tunnel_id) && |
2985 | net_eq(sock_net(sk), net) && |
2986 | !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && |
2987 | + (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) && |
2988 | (!sk->sk_bound_dev_if || !dif || |
2989 | sk->sk_bound_dev_if == dif)) |
2990 | goto found; |
2991 | @@ -72,15 +74,6 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif |
2992 | return sk; |
2993 | } |
2994 | |
2995 | -static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id) |
2996 | -{ |
2997 | - struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id); |
2998 | - if (sk) |
2999 | - sock_hold(sk); |
3000 | - |
3001 | - return sk; |
3002 | -} |
3003 | - |
3004 | /* When processing receive frames, there are two cases to |
3005 | * consider. Data frames consist of a non-zero session-id and an |
3006 | * optional cookie. Control frames consist of a regular L2TP header |
3007 | @@ -186,8 +179,8 @@ static int l2tp_ip_recv(struct sk_buff *skb) |
3008 | struct iphdr *iph = (struct iphdr *) skb_network_header(skb); |
3009 | |
3010 | read_lock_bh(&l2tp_ip_lock); |
3011 | - sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb), |
3012 | - tunnel_id); |
3013 | + sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, |
3014 | + inet_iif(skb), tunnel_id); |
3015 | if (!sk) { |
3016 | read_unlock_bh(&l2tp_ip_lock); |
3017 | goto discard; |
3018 | @@ -289,7 +282,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
3019 | inet->inet_saddr = 0; /* Use device */ |
3020 | |
3021 | write_lock_bh(&l2tp_ip_lock); |
3022 | - if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, |
3023 | + if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0, |
3024 | sk->sk_bound_dev_if, addr->l2tp_conn_id)) { |
3025 | write_unlock_bh(&l2tp_ip_lock); |
3026 | ret = -EADDRINUSE; |
3027 | diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c |
3028 | index 1d522ce833e6..247097289fd0 100644 |
3029 | --- a/net/l2tp/l2tp_ip6.c |
3030 | +++ b/net/l2tp/l2tp_ip6.c |
3031 | @@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk) |
3032 | |
3033 | static struct sock *__l2tp_ip6_bind_lookup(struct net *net, |
3034 | struct in6_addr *laddr, |
3035 | + const struct in6_addr *raddr, |
3036 | int dif, u32 tunnel_id) |
3037 | { |
3038 | struct sock *sk; |
3039 | |
3040 | sk_for_each_bound(sk, &l2tp_ip6_bind_table) { |
3041 | const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk); |
3042 | + const struct in6_addr *sk_raddr = &sk->sk_v6_daddr; |
3043 | struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); |
3044 | |
3045 | if (l2tp == NULL) |
3046 | @@ -73,6 +75,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, |
3047 | if ((l2tp->conn_id == tunnel_id) && |
3048 | net_eq(sock_net(sk), net) && |
3049 | (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) && |
3050 | + (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) && |
3051 | (!sk->sk_bound_dev_if || !dif || |
3052 | sk->sk_bound_dev_if == dif)) |
3053 | goto found; |
3054 | @@ -83,17 +86,6 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, |
3055 | return sk; |
3056 | } |
3057 | |
3058 | -static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, |
3059 | - struct in6_addr *laddr, |
3060 | - int dif, u32 tunnel_id) |
3061 | -{ |
3062 | - struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id); |
3063 | - if (sk) |
3064 | - sock_hold(sk); |
3065 | - |
3066 | - return sk; |
3067 | -} |
3068 | - |
3069 | /* When processing receive frames, there are two cases to |
3070 | * consider. Data frames consist of a non-zero session-id and an |
3071 | * optional cookie. Control frames consist of a regular L2TP header |
3072 | @@ -200,8 +192,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) |
3073 | struct ipv6hdr *iph = ipv6_hdr(skb); |
3074 | |
3075 | read_lock_bh(&l2tp_ip6_lock); |
3076 | - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb), |
3077 | - tunnel_id); |
3078 | + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, |
3079 | + inet6_iif(skb), tunnel_id); |
3080 | if (!sk) { |
3081 | read_unlock_bh(&l2tp_ip6_lock); |
3082 | goto discard; |
3083 | @@ -339,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
3084 | rcu_read_unlock(); |
3085 | |
3086 | write_lock_bh(&l2tp_ip6_lock); |
3087 | - if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if, |
3088 | + if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if, |
3089 | addr->l2tp_conn_id)) { |
3090 | write_unlock_bh(&l2tp_ip6_lock); |
3091 | err = -EADDRINUSE; |
3092 | diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c |
3093 | index e75cbf6ecc26..a0d901d8992e 100644 |
3094 | --- a/net/mac80211/chan.c |
3095 | +++ b/net/mac80211/chan.c |
3096 | @@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata) |
3097 | !(sta->sdata->bss && sta->sdata->bss == sdata->bss)) |
3098 | continue; |
3099 | |
3100 | - if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC)) |
3101 | - continue; |
3102 | - |
3103 | max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta)); |
3104 | } |
3105 | rcu_read_unlock(); |
3106 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
3107 | index 274c564bd9af..1ffd1e145c13 100644 |
3108 | --- a/net/mac80211/tx.c |
3109 | +++ b/net/mac80211/tx.c |
3110 | @@ -1244,7 +1244,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, |
3111 | |
3112 | static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, |
3113 | struct ieee80211_vif *vif, |
3114 | - struct ieee80211_sta *pubsta, |
3115 | + struct sta_info *sta, |
3116 | struct sk_buff *skb) |
3117 | { |
3118 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
3119 | @@ -1258,10 +1258,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, |
3120 | if (!ieee80211_is_data(hdr->frame_control)) |
3121 | return NULL; |
3122 | |
3123 | - if (pubsta) { |
3124 | + if (sta) { |
3125 | u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; |
3126 | |
3127 | - txq = pubsta->txq[tid]; |
3128 | + if (!sta->uploaded) |
3129 | + return NULL; |
3130 | + |
3131 | + txq = sta->sta.txq[tid]; |
3132 | } else if (vif) { |
3133 | txq = vif->txq; |
3134 | } |
3135 | @@ -1499,23 +1502,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, |
3136 | struct fq *fq = &local->fq; |
3137 | struct ieee80211_vif *vif; |
3138 | struct txq_info *txqi; |
3139 | - struct ieee80211_sta *pubsta; |
3140 | |
3141 | if (!local->ops->wake_tx_queue || |
3142 | sdata->vif.type == NL80211_IFTYPE_MONITOR) |
3143 | return false; |
3144 | |
3145 | - if (sta && sta->uploaded) |
3146 | - pubsta = &sta->sta; |
3147 | - else |
3148 | - pubsta = NULL; |
3149 | - |
3150 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
3151 | sdata = container_of(sdata->bss, |
3152 | struct ieee80211_sub_if_data, u.ap); |
3153 | |
3154 | vif = &sdata->vif; |
3155 | - txqi = ieee80211_get_txq(local, vif, pubsta, skb); |
3156 | + txqi = ieee80211_get_txq(local, vif, sta, skb); |
3157 | |
3158 | if (!txqi) |
3159 | return false; |
3160 | diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c |
3161 | index c985ecbe9bd6..ae5ac175b2be 100644 |
3162 | --- a/net/qrtr/qrtr.c |
3163 | +++ b/net/qrtr/qrtr.c |
3164 | @@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node, |
3165 | const int pkt_len = 20; |
3166 | struct qrtr_hdr *hdr; |
3167 | struct sk_buff *skb; |
3168 | - u32 *buf; |
3169 | + __le32 *buf; |
3170 | |
3171 | skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL); |
3172 | if (!skb) |
3173 | @@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node, |
3174 | hdr->dst_node_id = cpu_to_le32(dst_node); |
3175 | hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL); |
3176 | |
3177 | - buf = (u32 *)skb_put(skb, pkt_len); |
3178 | + buf = (__le32 *)skb_put(skb, pkt_len); |
3179 | memset(buf, 0, pkt_len); |
3180 | buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX); |
3181 | buf[1] = cpu_to_le32(src_node); |
3182 | diff --git a/net/sctp/debug.c b/net/sctp/debug.c |
3183 | index 95d7b15dad21..e371a0d90068 100644 |
3184 | --- a/net/sctp/debug.c |
3185 | +++ b/net/sctp/debug.c |
3186 | @@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = { |
3187 | /* Lookup timer debug name. */ |
3188 | const char *sctp_tname(const sctp_subtype_t id) |
3189 | { |
3190 | - if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX) |
3191 | + if (id.timeout < ARRAY_SIZE(sctp_timer_tbl)) |
3192 | return sctp_timer_tbl[id.timeout]; |
3193 | return "unknown_timer"; |
3194 | } |
3195 | diff --git a/net/tipc/server.c b/net/tipc/server.c |
3196 | index f89c0c2e8c16..3cd6402e812c 100644 |
3197 | --- a/net/tipc/server.c |
3198 | +++ b/net/tipc/server.c |
3199 | @@ -86,7 +86,6 @@ struct outqueue_entry { |
3200 | static void tipc_recv_work(struct work_struct *work); |
3201 | static void tipc_send_work(struct work_struct *work); |
3202 | static void tipc_clean_outqueues(struct tipc_conn *con); |
3203 | -static void tipc_sock_release(struct tipc_conn *con); |
3204 | |
3205 | static void tipc_conn_kref_release(struct kref *kref) |
3206 | { |
3207 | @@ -104,7 +103,6 @@ static void tipc_conn_kref_release(struct kref *kref) |
3208 | } |
3209 | saddr->scope = -TIPC_NODE_SCOPE; |
3210 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); |
3211 | - tipc_sock_release(con); |
3212 | sock_release(sock); |
3213 | con->sock = NULL; |
3214 | |
3215 | @@ -194,19 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con) |
3216 | write_unlock_bh(&sk->sk_callback_lock); |
3217 | } |
3218 | |
3219 | -static void tipc_sock_release(struct tipc_conn *con) |
3220 | +static void tipc_close_conn(struct tipc_conn *con) |
3221 | { |
3222 | struct tipc_server *s = con->server; |
3223 | |
3224 | - if (con->conid) |
3225 | - s->tipc_conn_release(con->conid, con->usr_data); |
3226 | - |
3227 | - tipc_unregister_callbacks(con); |
3228 | -} |
3229 | - |
3230 | -static void tipc_close_conn(struct tipc_conn *con) |
3231 | -{ |
3232 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { |
3233 | + tipc_unregister_callbacks(con); |
3234 | + |
3235 | + if (con->conid) |
3236 | + s->tipc_conn_release(con->conid, con->usr_data); |
3237 | |
3238 | /* We shouldn't flush pending works as we may be in the |
3239 | * thread. In fact the races with pending rx/tx work structs |
3240 | @@ -625,14 +619,12 @@ int tipc_server_start(struct tipc_server *s) |
3241 | void tipc_server_stop(struct tipc_server *s) |
3242 | { |
3243 | struct tipc_conn *con; |
3244 | - int total = 0; |
3245 | int id; |
3246 | |
3247 | spin_lock_bh(&s->idr_lock); |
3248 | - for (id = 0; total < s->idr_in_use; id++) { |
3249 | + for (id = 0; s->idr_in_use; id++) { |
3250 | con = idr_find(&s->conn_idr, id); |
3251 | if (con) { |
3252 | - total++; |
3253 | spin_unlock_bh(&s->idr_lock); |
3254 | tipc_close_conn(con); |
3255 | spin_lock_bh(&s->idr_lock); |
3256 | diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c |
3257 | index 0e8762945e79..2b3def14b4fb 100644 |
3258 | --- a/security/integrity/ima/ima_main.c |
3259 | +++ b/security/integrity/ima/ima_main.c |
3260 | @@ -51,6 +51,8 @@ static int __init hash_setup(char *str) |
3261 | ima_hash_algo = HASH_ALGO_SHA1; |
3262 | else if (strncmp(str, "md5", 3) == 0) |
3263 | ima_hash_algo = HASH_ALGO_MD5; |
3264 | + else |
3265 | + return 1; |
3266 | goto out; |
3267 | } |
3268 | |
3269 | @@ -60,6 +62,8 @@ static int __init hash_setup(char *str) |
3270 | break; |
3271 | } |
3272 | } |
3273 | + if (i == HASH_ALGO__LAST) |
3274 | + return 1; |
3275 | out: |
3276 | hash_setup_done = 1; |
3277 | return 1; |
3278 | diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h |
3279 | index 51334edec506..f306a7642509 100644 |
3280 | --- a/tools/include/linux/poison.h |
3281 | +++ b/tools/include/linux/poison.h |
3282 | @@ -14,6 +14,10 @@ |
3283 | # define POISON_POINTER_DELTA 0 |
3284 | #endif |
3285 | |
3286 | +#ifdef __cplusplus |
3287 | +#define LIST_POISON1 NULL |
3288 | +#define LIST_POISON2 NULL |
3289 | +#else |
3290 | /* |
3291 | * These are non-NULL pointers that will result in page faults |
3292 | * under normal circumstances, used to verify that nobody uses |
3293 | @@ -21,6 +25,7 @@ |
3294 | */ |
3295 | #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) |
3296 | #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) |
3297 | +#endif |
3298 | |
3299 | /********** include/linux/timer.h **********/ |
3300 | /* |
3301 | diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c |
3302 | index 28d1605b0338..b60a6fd66517 100644 |
3303 | --- a/tools/perf/tests/attr.c |
3304 | +++ b/tools/perf/tests/attr.c |
3305 | @@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf) |
3306 | snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s", |
3307 | d, d, perf, vcnt, v); |
3308 | |
3309 | - return system(cmd); |
3310 | + return system(cmd) ? TEST_FAIL : TEST_OK; |
3311 | } |
3312 | |
3313 | int test__attr(int subtest __maybe_unused) |
3314 | diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c |
3315 | index e717fed80219..f936a3cd3e35 100644 |
3316 | --- a/tools/testing/selftests/x86/ldt_gdt.c |
3317 | +++ b/tools/testing/selftests/x86/ldt_gdt.c |
3318 | @@ -360,9 +360,24 @@ static void do_simple_tests(void) |
3319 | install_invalid(&desc, false); |
3320 | |
3321 | desc.seg_not_present = 0; |
3322 | - desc.read_exec_only = 0; |
3323 | desc.seg_32bit = 1; |
3324 | + desc.read_exec_only = 0; |
3325 | + desc.limit = 0xfffff; |
3326 | + |
3327 | install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB); |
3328 | + |
3329 | + desc.limit_in_pages = 1; |
3330 | + |
3331 | + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB | AR_G); |
3332 | + desc.read_exec_only = 1; |
3333 | + install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P | AR_DB | AR_G); |
3334 | + desc.contents = 1; |
3335 | + desc.read_exec_only = 0; |
3336 | + install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G); |
3337 | + desc.read_exec_only = 1; |
3338 | + install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | AR_S | AR_P | AR_DB | AR_G); |
3339 | + |
3340 | + desc.limit = 0; |
3341 | install_invalid(&desc, true); |
3342 | } |
3343 | |
3344 | diff --git a/tools/usb/usbip/Makefile.am b/tools/usb/usbip/Makefile.am |
3345 | index 66f8bf038c9f..45eaa70a71e0 100644 |
3346 | --- a/tools/usb/usbip/Makefile.am |
3347 | +++ b/tools/usb/usbip/Makefile.am |
3348 | @@ -1,6 +1,7 @@ |
3349 | SUBDIRS := libsrc src |
3350 | includedir = @includedir@/usbip |
3351 | include_HEADERS := $(addprefix libsrc/, \ |
3352 | - usbip_common.h vhci_driver.h usbip_host_driver.h) |
3353 | + usbip_common.h vhci_driver.h usbip_host_driver.h \ |
3354 | + list.h sysfs_utils.h usbip_host_common.h) |
3355 | |
3356 | dist_man_MANS := $(addprefix doc/, usbip.8 usbipd.8) |
3357 | diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c |
3358 | index 27a1f6341d41..7b49a1378c90 100644 |
3359 | --- a/virt/kvm/arm/arch_timer.c |
3360 | +++ b/virt/kvm/arm/arch_timer.c |
3361 | @@ -89,9 +89,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work) |
3362 | struct kvm_vcpu *vcpu; |
3363 | |
3364 | vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); |
3365 | - vcpu->arch.timer_cpu.armed = false; |
3366 | - |
3367 | - WARN_ON(!kvm_timer_should_fire(vcpu)); |
3368 | |
3369 | /* |
3370 | * If the vcpu is blocked we want to wake it up so that it will see |